From d9757c7c0cf941edc5d7c7c151251a01a32b635a Mon Sep 17 00:00:00 2001 From: chenglei Date: Mon, 12 Jun 2023 10:39:17 +0800 Subject: [PATCH 001/514] =?UTF-8?q?HBASE-27923=20NettyRpcServer=20may=20ha?= =?UTF-8?q?nge=20if=20it=20should=20skip=20initial=20sasl=20h=E2=80=A6=20(?= =?UTF-8?q?#5281)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Duo Zhang Signed-off-by: Wellington Chevreuil --- .../NettyHBaseSaslRpcClientHandler.java | 15 +- .../ipc/NettyHBaseSaslRpcServerHandler.java | 8 +- .../hadoop/hbase/ipc/NettyRpcServer.java | 6 +- .../ipc/NettyRpcServerPreambleHandler.java | 5 +- .../ipc/NettyRpcServerResponseEncoder.java | 2 + .../hbase/ipc/NettyServerRpcConnection.java | 9 +- .../ipc/TestRpcSkipInitialSaslHandshake.java | 176 ++++++++++++++++++ 7 files changed, 207 insertions(+), 14 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index d627b7b6e9d9..48e631c76299 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -94,16 +94,20 @@ private void tryComplete(ChannelHandlerContext ctx) { if (LOG.isTraceEnabled()) { LOG.trace("SASL negotiation for {} is complete", provider.getSaslAuthMethod().getName()); } - ChannelPipeline p = ctx.pipeline(); - saslRpcClient.setupSaslHandler(p, HANDLER_NAME); - p.remove(SaslChallengeDecoder.class); - p.remove(this); + saslRpcClient.setupSaslHandler(ctx.pipeline(), HANDLER_NAME); + removeHandlers(ctx); setCryptoAESOption(); saslPromise.setSuccess(true); } + private void removeHandlers(ChannelHandlerContext ctx) { + ChannelPipeline p = ctx.pipeline(); + p.remove(SaslChallengeDecoder.class); + p.remove(this); + } + private void setCryptoAESOption() { boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() .equalsIgnoreCase(saslRpcClient.getSaslQOP()); @@ -158,6 +162,9 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep } else { saslPromise.tryFailure(new FallbackDisallowedException()); } + // When we switch to simple auth, we should also remove SaslChallengeDecoder and + // NettyHBaseSaslRpcClientHandler. + removeHandlers(ctx); return; } LOG.trace("Reading input token size={} for processing by initSASLContext", len); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java index 387318888a00..cb7a173625e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java @@ -89,9 +89,11 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep boolean useWrap = qop != null && !"auth".equalsIgnoreCase(qop); ChannelPipeline p = ctx.pipeline(); if (useWrap) { - p.addBefore(DECODER_NAME, null, new SaslWrapHandler(saslServer::wrap)).addLast( - new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4), - new SaslUnwrapHandler(saslServer::unwrap)); + p.addBefore(DECODER_NAME, null, new SaslWrapHandler(saslServer::wrap)) + .addBefore(NettyRpcServerResponseEncoder.NAME, null, + new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4)) + .addBefore(NettyRpcServerResponseEncoder.NAME, null, + new SaslUnwrapHandler(saslServer::unwrap)); } conn.setupHandler(); p.remove(this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index dd5afe92c4e5..0b7badf7d815 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -125,7 +125,11 @@ protected void initChannel(Channel ch) throws Exception { initSSL(pipeline, conf.getBoolean(HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT, true)); } pipeline.addLast(NettyRpcServerPreambleHandler.DECODER_NAME, preambleDecoder) - .addLast(createNettyRpcServerPreambleHandler()); + .addLast(createNettyRpcServerPreambleHandler()) + // We need NettyRpcServerResponseEncoder here because NettyRpcServerPreambleHandler may + // send RpcResponse to client. + .addLast(NettyRpcServerResponseEncoder.NAME, + new NettyRpcServerResponseEncoder(metrics)); } }); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index ca25dea17fe2..8269bbc60d88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -58,8 +58,9 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep LengthFieldBasedFrameDecoder decoder = new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4); decoder.setSingleDecode(true); - p.addLast(NettyHBaseSaslRpcServerHandler.DECODER_NAME, decoder); - p.addLast(new NettyHBaseSaslRpcServerHandler(rpcServer, conn)); + p.addBefore(NettyRpcServerResponseEncoder.NAME, NettyHBaseSaslRpcServerHandler.DECODER_NAME, + decoder).addBefore(NettyRpcServerResponseEncoder.NAME, null, + new NettyHBaseSaslRpcServerHandler(rpcServer, conn)); } else { conn.setupHandler(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java index 30f8dba236a5..d3e338ffce02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerResponseEncoder.java @@ -31,6 +31,8 @@ @InterfaceAudience.Private class NettyRpcServerResponseEncoder extends ChannelOutboundHandlerAdapter { + static final String NAME = "NettyRpcServerResponseEncoder"; + private final MetricsHBaseServer metrics; NettyRpcServerResponseEncoder(MetricsHBaseServer metrics) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java index 54c105802c55..f52357539dec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java @@ -71,9 +71,10 @@ class NettyServerRpcConnection extends ServerRpcConnection { void setupHandler() { channel.pipeline() - .addLast("frameDecoder", new NettyRpcFrameDecoder(rpcServer.maxRequestSize, this)) - .addLast("decoder", new NettyRpcServerRequestDecoder(rpcServer.metrics, this)) - .addLast("encoder", new NettyRpcServerResponseEncoder(rpcServer.metrics)); + .addBefore(NettyRpcServerResponseEncoder.NAME, "frameDecoder", + new NettyRpcFrameDecoder(rpcServer.maxRequestSize, this)) + .addBefore(NettyRpcServerResponseEncoder.NAME, "decoder", + new NettyRpcServerRequestDecoder(rpcServer.metrics, this)); } void process(ByteBuf buf) throws IOException, InterruptedException { @@ -115,6 +116,6 @@ public NettyServerCall createCall(int id, final BlockingService service, @Override protected void doRespond(RpcResponse resp) { - channel.writeAndFlush(resp); + NettyFutureUtils.safeWriteAndFlush(channel, resp); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java new file mode 100644 index 000000000000..9f6b7d54430b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl.SERVICE; +import static org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl.newBlockingStub; +import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getKeytabFileForTesting; +import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getPrincipalForTesting; +import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.loginKerberosPrincipal; +import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.setSecuredConfiguration; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.security.HBaseKerberosUtils; +import org.apache.hadoop.hbase.security.SecurityInfo; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RPCTests; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.Channel; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; + +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos; +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface; + +@Category({ RPCTests.class, MediumTests.class }) +public class TestRpcSkipInitialSaslHandshake { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRpcSkipInitialSaslHandshake.class); + + protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + protected static final File KEYTAB_FILE = + new File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); + + protected static MiniKdc KDC; + protected static String HOST = "localhost"; + protected static String PRINCIPAL; + + protected String krbKeytab; + protected String krbPrincipal; + protected UserGroupInformation ugi; + protected Configuration clientConf; + protected Configuration serverConf; + + protected static void initKDCAndConf() throws Exception { + KDC = TEST_UTIL.setupMiniKdc(KEYTAB_FILE); + PRINCIPAL = "hbase/" + HOST; + KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL); + HBaseKerberosUtils.setPrincipalForTesting(PRINCIPAL + "@" + KDC.getRealm()); + // set a smaller timeout and retry to speed up tests + TEST_UTIL.getConfiguration().setInt(RpcClient.SOCKET_TIMEOUT_READ, 2000000000); + TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxretries", 1); + } + + protected static void stopKDC() throws InterruptedException { + if (KDC != null) { + KDC.stop(); + } + } + + protected final void setUpPrincipalAndConf() throws Exception { + krbKeytab = getKeytabFileForTesting(); + krbPrincipal = getPrincipalForTesting(); + ugi = loginKerberosPrincipal(krbKeytab, krbPrincipal); + clientConf = new Configuration(TEST_UTIL.getConfiguration()); + setSecuredConfiguration(clientConf); + clientConf.setBoolean(RpcClient.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true); + serverConf = new Configuration(TEST_UTIL.getConfiguration()); + } + + @BeforeClass + public static void setUp() throws Exception { + initKDCAndConf(); + } + + @AfterClass + public static void tearDown() throws Exception { + stopKDC(); + TEST_UTIL.cleanupTestDir(); + } + + @Before + public void setUpTest() throws Exception { + setUpPrincipalAndConf(); + } + + /** + * This test is for HBASE-27923,which NettyRpcServer may hange if it should skip initial sasl + * handshake. + */ + @Test + public void test() throws Exception { + SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); + Mockito.when(securityInfoMock.getServerPrincipal()) + .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); + SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); + + final AtomicBoolean useSaslRef = new AtomicBoolean(false); + NettyRpcServer rpcServer = new NettyRpcServer(null, getClass().getSimpleName(), + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress(HOST, 0), serverConf, new FifoRpcScheduler(serverConf, 1), true) { + + @Override + protected NettyRpcServerPreambleHandler createNettyRpcServerPreambleHandler() { + return new NettyRpcServerPreambleHandler(this) { + private NettyServerRpcConnection conn; + + @Override + protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { + super.channelRead0(ctx, msg); + useSaslRef.set(conn.useSasl); + + } + + @Override + protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { + conn = super.createNettyServerRpcConnection(channel); + return conn; + } + }; + } + }; + + rpcServer.start(); + try (NettyRpcClient rpcClient = + new NettyRpcClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString(), null, null)) { + BlockingInterface stub = newBlockingStub(rpcClient, rpcServer.getListenerAddress(), + User.create(UserGroupInformation.getCurrentUser())); + + String response = + stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage("test").build()) + .getMessage(); + assertTrue("test".equals(response)); + assertFalse(useSaslRef.get()); + + } finally { + rpcServer.stop(); + } + } +} From ddc67527f3c79bfb98458f6b2df0564d6c1b77af Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Tue, 13 Jun 2023 13:03:57 -0700 Subject: [PATCH 002/514] HBASE-27925 Update downloads.xml for release 2.5.5 Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 8ed218e9c24b..d2627fddb336 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -70,26 +70,26 @@ under the License. - 2.5.4 + 2.5.5 - 2023/04/14 + 2023/06/13 - 2.5.4 vs 2.5.3 + 2.5.5 vs 2.5.4 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc)
- hadoop3-bin (sha512 asc)
- hadoop3-client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc)
+ hadoop3-bin (sha512 asc)
+ hadoop3-client-bin (sha512 asc) From 68da890dca58b45104d1705ebd9c233cd912497d Mon Sep 17 00:00:00 2001 From: DieterDP <90392398+DieterDP-ng@users.noreply.github.com> Date: Thu, 15 Jun 2023 12:48:36 +0200 Subject: [PATCH 003/514] HBASE-27933 Update stable version to 2.5.x (#5291) The actual stable version was changed in HBASE-27849, but this link was forgotten. Signed-off-by: Peter Somogyi --- src/site/xdoc/downloads.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index d2627fddb336..6cb2246f0137 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -91,7 +91,7 @@ under the License. hadoop3-bin (sha512 asc)
hadoop3-client-bin (sha512 asc) - + stable release @@ -114,7 +114,7 @@ under the License. bin (sha512 asc)
client-bin (sha512 asc) - stable release + From 4be74d2455a172c7c8a76d30c80e5864451e9624 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 15 Jun 2023 21:56:34 +0800 Subject: [PATCH 004/514] HBASE-27917 Set version to 4.0.0-alpha-1-SNAPSHOT on master (#5276) Signed-off-by: Liangjun He --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 7ec4a1dc9d9a..1503a95266f7 100644 --- a/pom.xml +++ b/pom.xml @@ -773,7 +773,7 @@ - 3.0.0-beta-1-SNAPSHOT + 4.0.0-alpha-1-SNAPSHOT false From f534d828e950f8d1312c61e4d1dbd5f82fa92630 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Fri, 16 Jun 2023 08:11:47 -0700 Subject: [PATCH 005/514] HBASE-27894 create-release is broken by recent gitbox changes (#5262) Use the github webui to retrieve the project pom from the build branch instead of gitbox. Signed-off-by: Duo Zhang Signed-off-by: Xiaolin Ha --- dev-support/create-release/release-util.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 787ec4dac181..3a1b38644f85 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -146,12 +146,12 @@ function get_release_info { if [[ -z "${ASF_REPO}" ]]; then ASF_REPO="https://gitbox.apache.org/repos/asf/${PROJECT}.git" fi - if [[ -z "${ASF_REPO_WEBUI}" ]]; then - ASF_REPO_WEBUI="https://gitbox.apache.org/repos/asf?p=${PROJECT}.git" - fi if [[ -z "${ASF_GITHUB_REPO}" ]]; then ASF_GITHUB_REPO="https://github.com/apache/${PROJECT}" fi + if [[ -z "${ASF_GITHUB_WEBUI}" ]] ; then + ASF_GITHUB_WEBUI="https://raw.githubusercontent.com/apache/${PROJECT}" + fi if [ -z "$GIT_BRANCH" ]; then # If no branch is specified, find out the latest branch from the repo. GIT_BRANCH="$(git ls-remote --heads "$ASF_REPO" | @@ -167,14 +167,14 @@ function get_release_info { # Find the current version for the branch. local version - version="$(curl -s "$ASF_REPO_WEBUI;a=blob_plain;f=pom.xml;hb=refs/heads/$GIT_BRANCH" | + version="$(curl -s "$ASF_GITHUB_WEBUI/refs/heads/$GIT_BRANCH/pom.xml" | parse_version)" # We do not want to expand ${revision} here, see https://maven.apache.org/maven-ci-friendly.html # If we use ${revision} as placeholder, we need to parse the revision property to # get maven version # shellcheck disable=SC2016 if [[ "${version}" == '${revision}' ]]; then - version="$(curl -s "$ASF_REPO_WEBUI;a=blob_plain;f=pom.xml;hb=refs/heads/$GIT_BRANCH" | + version="$(curl -s "$ASF_GITHUB_WEBUI/refs/heads/$GIT_BRANCH/pom.xml" | parse_revision)" fi log "Current branch VERSION is $version." From 663bc642b6d6b4e364bdeddcf197a0fa2fd8e228 Mon Sep 17 00:00:00 2001 From: chaijunjie0101 <64140218+chaijunjie0101@users.noreply.github.com> Date: Fri, 16 Jun 2023 23:15:24 +0800 Subject: [PATCH 006/514] HBASE-27888 Record readBlock message in log when it takes too long time (#5255) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index c84836bcd532..434529ec46f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1385,6 +1385,13 @@ static class FSReaderImpl implements FSReader { private final boolean isPreadAllBytes; + private final long readWarnTime; + + /** + * If reading block cost time in milliseconds more than the threshold, a warning will be logged. + */ + public static final String FS_READER_WARN_TIME_MS = "hbase.fs.reader.warn.time.ms"; + FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, ByteBuffAllocator allocator, Configuration conf) throws IOException { this.fileSize = readerContext.getFileSize(); @@ -1402,6 +1409,8 @@ static class FSReaderImpl implements FSReader { defaultDecodingCtx = new HFileBlockDefaultDecodingContext(conf, fileContext); encodedBlockDecodingCtx = defaultDecodingCtx; isPreadAllBytes = readerContext.isPreadAllBytes(); + // Default warn threshold set to -1, it means skipping record the read block slow warning log. + readWarnTime = conf.getLong(FS_READER_WARN_TIME_MS, -1L); } @Override @@ -1759,6 +1768,10 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, hFileBlock.sanityCheckUncompressed(); } LOG.trace("Read {} in {} ms", hFileBlock, duration); + if (!LOG.isTraceEnabled() && this.readWarnTime >= 0 && duration > this.readWarnTime) { + LOG.warn("Read Block Slow: read {} cost {} ms, threshold = {} ms", hFileBlock, duration, + this.readWarnTime); + } span.addEvent("Read block", attributesBuilder.build()); // Cache next block header if we read it for the next time through here. if (nextBlockOnDiskSize != -1) { From 0703d36daf8dd5c36164419032ff0760bb3f65cc Mon Sep 17 00:00:00 2001 From: chenglei Date: Fri, 16 Jun 2023 23:36:23 +0800 Subject: [PATCH 007/514] =?UTF-8?q?HBASE-27924=20Remove=20duplicate=20code?= =?UTF-8?q?=20for=20NettyHBaseSaslRpcServerHandler=20=E2=80=A6=20(#5285)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: comnetwork Signed-off-by: Duo Zhang --- .../ipc/NettyHBaseSaslRpcServerHandler.java | 25 +-- .../ipc/TestSecurityRpcSentBytesMetrics.java | 155 ++++++++++++++++++ 2 files changed, 157 insertions(+), 23 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecurityRpcSentBytesMetrics.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java index cb7a173625e1..dd6f84daae3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java @@ -17,20 +17,16 @@ */ package org.apache.hadoop.hbase.ipc; -import java.io.IOException; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; import org.apache.hadoop.hbase.security.SaslStatus; import org.apache.hadoop.hbase.security.SaslUnwrapHandler; import org.apache.hadoop.hbase.security.SaslWrapHandler; import org.apache.hadoop.hbase.util.NettyFutureUtils; import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; @@ -54,23 +50,6 @@ class NettyHBaseSaslRpcServerHandler extends SimpleChannelInboundHandler Date: Fri, 16 Jun 2023 23:53:33 +0800 Subject: [PATCH 008/514] HBASE-27939 Bump snappy-java from 1.1.9.1 to 1.1.10.1 (#5292) Bumps [snappy-java](https://github.com/xerial/snappy-java) from 1.1.9.1 to 1.1.10.1. - [Release notes](https://github.com/xerial/snappy-java/releases) - [Commits](https://github.com/xerial/snappy-java/compare/v1.1.9.1...v1.1.10.1) --- updated-dependencies: - dependency-name: org.xerial.snappy:snappy-java dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1503a95266f7..7958e5c221f2 100644 --- a/pom.xml +++ b/pom.xml @@ -889,7 +889,7 @@ 0.24 1.11.0 1.8.0 - 1.1.9.1 + 1.1.10.1 1.9 1.5.5-2 4.1.4 From 622f4ae8628cffc60515b1c6a699b0fa3b04131e Mon Sep 17 00:00:00 2001 From: Himanshu Gwalani Date: Fri, 16 Jun 2023 22:32:00 +0530 Subject: [PATCH 009/514] HBASE-27904: A random data generator tool leveraging hbase bulk load (#5280) Signed-off-by: Viraj Jasani --- .../BulkDataGeneratorInputFormat.java | 87 +++++ .../BulkDataGeneratorMapper.java | 138 ++++++++ .../BulkDataGeneratorRecordReader.java | 75 +++++ .../BulkDataGeneratorTool.java | 301 ++++++++++++++++++ .../hbase/util/bulkdatagenerator/Utility.java | 102 ++++++ .../_chapters/bulk_data_generator_tool.adoc | 132 ++++++++ src/main/asciidoc/book.adoc | 1 + 7 files changed, 836 insertions(+) create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorInputFormat.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorMapper.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorRecordReader.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorTool.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/Utility.java create mode 100644 src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorInputFormat.java new file mode 100644 index 000000000000..f40951e945df --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorInputFormat.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util.bulkdatagenerator; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +public class BulkDataGeneratorInputFormat extends InputFormat { + + public static final String MAPPER_TASK_COUNT_KEY = + BulkDataGeneratorInputFormat.class.getName() + "mapper.task.count"; + + @Override + public List getSplits(JobContext job) throws IOException { + // Get the number of mapper tasks configured + int mapperCount = job.getConfiguration().getInt(MAPPER_TASK_COUNT_KEY, -1); + Preconditions.checkArgument(mapperCount > 1, MAPPER_TASK_COUNT_KEY + " is not set."); + + // Create a number of input splits equal to the number of mapper tasks + ArrayList splits = new ArrayList(); + for (int i = 0; i < mapperCount; ++i) { + splits.add(new FakeInputSplit()); + } + return splits; + } + + @Override + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { + BulkDataGeneratorRecordReader bulkDataGeneratorRecordReader = + new BulkDataGeneratorRecordReader(); + bulkDataGeneratorRecordReader.initialize(split, context); + return bulkDataGeneratorRecordReader; + } + + /** + * Dummy input split to be used by {@link BulkDataGeneratorRecordReader} + */ + private static class FakeInputSplit extends InputSplit implements Writable { + + @Override + public void readFields(DataInput arg0) throws IOException { + } + + @Override + public void write(DataOutput arg0) throws IOException { + } + + @Override + public long getLength() throws IOException, InterruptedException { + return 0; + } + + @Override + public String[] getLocations() throws IOException, InterruptedException { + return new String[0]; + } + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorMapper.java new file mode 100644 index 000000000000..35f8b9c471e5 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorMapper.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util.bulkdatagenerator; + +import java.io.IOException; +import java.math.BigDecimal; +import java.util.List; +import java.util.Map; +import java.util.Random; +import org.apache.commons.math3.util.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.Mapper; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + +public class BulkDataGeneratorMapper + extends Mapper { + + /** Counter enumeration to count number of rows generated. */ + public static enum Counters { + ROWS_GENERATED + } + + public static final String SPLIT_COUNT_KEY = + BulkDataGeneratorMapper.class.getName() + "split.count"; + + private static final String ORG_ID = "00D000000000062"; + private static final int MAX_EVENT_ID = Integer.MAX_VALUE; + private static final int MAX_VEHICLE_ID = 100; + private static final int MAX_SPEED_KPH = 140; + private static final int NUM_LOCATIONS = 10; + private static int splitCount = 1; + private static final Random random = new Random(System.currentTimeMillis()); + private static final Map> LOCATIONS = + Maps.newHashMapWithExpectedSize(NUM_LOCATIONS); + private static final List LOCATION_KEYS = Lists.newArrayListWithCapacity(NUM_LOCATIONS); + static { + LOCATIONS.put("Belém", new Pair<>(BigDecimal.valueOf(-01.45), BigDecimal.valueOf(-48.48))); + LOCATIONS.put("Brasília", new Pair<>(BigDecimal.valueOf(-15.78), BigDecimal.valueOf(-47.92))); + LOCATIONS.put("Campinas", new Pair<>(BigDecimal.valueOf(-22.90), BigDecimal.valueOf(-47.05))); + LOCATIONS.put("Cuiaba", new Pair<>(BigDecimal.valueOf(-07.25), BigDecimal.valueOf(-58.42))); + LOCATIONS.put("Manaus", new Pair<>(BigDecimal.valueOf(-03.10), BigDecimal.valueOf(-60.00))); + LOCATIONS.put("Porto Velho", + new Pair<>(BigDecimal.valueOf(-08.75), BigDecimal.valueOf(-63.90))); + LOCATIONS.put("Recife", new Pair<>(BigDecimal.valueOf(-08.10), BigDecimal.valueOf(-34.88))); + LOCATIONS.put("Rio de Janeiro", + new Pair<>(BigDecimal.valueOf(-22.90), BigDecimal.valueOf(-43.23))); + LOCATIONS.put("Santarém", new Pair<>(BigDecimal.valueOf(-02.43), BigDecimal.valueOf(-54.68))); + LOCATIONS.put("São Paulo", new Pair<>(BigDecimal.valueOf(-23.53), BigDecimal.valueOf(-46.62))); + LOCATION_KEYS.addAll(LOCATIONS.keySet()); + } + + final static byte[] COLUMN_FAMILY_BYTES = Utility.COLUMN_FAMILY.getBytes(); + + /** {@inheritDoc} */ + @Override + protected void setup(Context context) throws IOException, InterruptedException { + Configuration c = context.getConfiguration(); + splitCount = c.getInt(SPLIT_COUNT_KEY, 1); + } + + /** + * Generates a single record based on value set to the key by + * {@link BulkDataGeneratorRecordReader#getCurrentKey()}. + * {@link Utility.TableColumnNames#TOOL_EVENT_ID} is first part of row key. Keeping first + * {@link Utility#SPLIT_PREFIX_LENGTH} characters as index of the record to be generated ensures + * that records are equally distributed across all regions of the table since region boundaries + * are generated in similar fashion. Check {@link Utility#createTable(Admin, String, int, Map)} + * method for region split info. + * @param key - The key having index of next record to be generated + * @param value - Value associated with the key (not used) + * @param context - Context of the mapper container + */ + @Override + protected void map(Text key, NullWritable value, Context context) + throws IOException, InterruptedException { + + int recordIndex = Integer.parseInt(key.toString()); + + // <6-characters-region-boundary-prefix>_<15-random-chars>_ + final String toolEventId = + String.format("%0" + Utility.SPLIT_PREFIX_LENGTH + "d", recordIndex % (splitCount + 1)) + "_" + + EnvironmentEdgeManager.currentTime() + (1e14 + (random.nextFloat() * 9e13)) + "_" + + recordIndex; + final String eventId = String.valueOf(Math.abs(random.nextInt(MAX_EVENT_ID))); + final String vechileId = String.valueOf(Math.abs(random.nextInt(MAX_VEHICLE_ID))); + final String speed = String.valueOf(Math.abs(random.nextInt(MAX_SPEED_KPH))); + final String location = LOCATION_KEYS.get(random.nextInt(NUM_LOCATIONS)); + final Pair coordinates = LOCATIONS.get(location); + final BigDecimal latitude = coordinates.getFirst(); + final BigDecimal longitude = coordinates.getSecond(); + + final ImmutableBytesWritable hKey = + new ImmutableBytesWritable(String.format("%s:%s", toolEventId, ORG_ID).getBytes()); + addKeyValue(context, hKey, Utility.TableColumnNames.ORG_ID, ORG_ID); + addKeyValue(context, hKey, Utility.TableColumnNames.TOOL_EVENT_ID, toolEventId); + addKeyValue(context, hKey, Utility.TableColumnNames.EVENT_ID, eventId); + addKeyValue(context, hKey, Utility.TableColumnNames.VEHICLE_ID, vechileId); + addKeyValue(context, hKey, Utility.TableColumnNames.SPEED, speed); + addKeyValue(context, hKey, Utility.TableColumnNames.LATITUDE, latitude.toString()); + addKeyValue(context, hKey, Utility.TableColumnNames.LONGITUDE, longitude.toString()); + addKeyValue(context, hKey, Utility.TableColumnNames.LOCATION, location); + addKeyValue(context, hKey, Utility.TableColumnNames.TIMESTAMP, + String.valueOf(EnvironmentEdgeManager.currentTime())); + + context.getCounter(Counters.ROWS_GENERATED).increment(1); + } + + private void addKeyValue(final Context context, ImmutableBytesWritable key, + final Utility.TableColumnNames columnName, final String value) + throws IOException, InterruptedException { + KeyValue kv = + new KeyValue(key.get(), COLUMN_FAMILY_BYTES, columnName.getColumnName(), value.getBytes()); + context.write(key, kv); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorRecordReader.java new file mode 100644 index 000000000000..f4ecc659e51b --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorRecordReader.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util.bulkdatagenerator; + +import java.io.IOException; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +public class BulkDataGeneratorRecordReader extends RecordReader { + + private int numRecordsToCreate = 0; + private int createdRecords = 0; + private Text key = new Text(); + private NullWritable value = NullWritable.get(); + + public static final String RECORDS_PER_MAPPER_TASK_KEY = + BulkDataGeneratorInputFormat.class.getName() + "records.per.mapper.task"; + + @Override + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { + // Get the number of records to create from the configuration + this.numRecordsToCreate = context.getConfiguration().getInt(RECORDS_PER_MAPPER_TASK_KEY, -1); + Preconditions.checkArgument(numRecordsToCreate > 0, + "Number of records to be created by per mapper should be greater than 0."); + } + + @Override + public boolean nextKeyValue() { + createdRecords++; + return createdRecords <= numRecordsToCreate; + } + + @Override + public Text getCurrentKey() { + // Set the index of record to be created + key.set(String.valueOf(createdRecords)); + return key; + } + + @Override + public NullWritable getCurrentValue() { + return value; + } + + @Override + public float getProgress() throws IOException, InterruptedException { + return (float) createdRecords / (float) numRecordsToCreate; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorTool.java new file mode 100644 index 000000000000..befa1486dec4 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/BulkDataGeneratorTool.java @@ -0,0 +1,301 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util.bulkdatagenerator; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2; +import org.apache.hadoop.hbase.tool.BulkLoadHFiles; +import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.util.GenericOptionsParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; +import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Parser; + +/** + * A command line utility to generate pre-splitted HBase Tables with large amount (TBs) of random + * data, equally distributed among all regions. + */ +public class BulkDataGeneratorTool { + + private static final Logger logger = LoggerFactory.getLogger(BulkDataGeneratorTool.class); + + /** + * Prefix for the generated HFiles directory + */ + private static final String OUTPUT_DIRECTORY_PREFIX = "/bulk_data_generator/"; + + /** + * Number of mapper container to be launched for generating of HFiles + */ + private int mapperCount; + + /** + * Number of rows to be generated by each mapper + */ + private long rowsPerMapper; + + /** + * Table for which random data needs to be generated + */ + private String table; + + /** + * Number of splits for the {@link #table}. Number of regions for the table will be + * ({@link #splitCount} + 1). + */ + private int splitCount; + + /** + * Flag to delete the table (before creating) if it already exists + */ + private boolean deleteTableIfExist; + + /** + * Additional HBase meta-data options to be set for the table + */ + private final Map tableOptions = new HashMap<>(); + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + BulkDataGeneratorTool bulkDataGeneratorTool = new BulkDataGeneratorTool(); + bulkDataGeneratorTool.run(conf, args); + } + + public boolean run(Configuration conf, String[] args) throws IOException { + // Read CLI arguments + CommandLine line = null; + try { + Parser parser = new GnuParser(); + line = parser.parse(getOptions(), args); + readCommandLineParameters(conf, line); + } catch (ParseException | IOException exception) { + logger.error("Error while parsing CLI arguments.", exception); + printUsage(); + return false; + } + + if (line.hasOption("-h")) { + printUsage(); + return true; + } + + Preconditions.checkArgument(!StringUtils.isEmpty(table), "Table name must not be empty"); + Preconditions.checkArgument(mapperCount > 0, "Mapper count must be greater than 0"); + Preconditions.checkArgument((splitCount > 0) && (splitCount < Utility.MAX_SPLIT_COUNT), + "Split count must be greater than 0 and less than " + Utility.MAX_SPLIT_COUNT); + Preconditions.checkArgument(rowsPerMapper > 0, "Rows per mapper must be greater than 0"); + + Path outputDirectory = generateOutputDirectory(); + logger.info("HFiles will be generated at " + outputDirectory.toString()); + + try (Connection connection = ConnectionFactory.createConnection(conf)) { + final Admin admin = connection.getAdmin(); + final TableName tableName = TableName.valueOf(table); + if (admin.tableExists(tableName)) { + if (deleteTableIfExist) { + logger.info( + "Deleting the table since it already exist and delete-if-exist flag is set to true"); + Utility.deleteTable(admin, table); + } else { + logger.info("Table already exists, cannot generate HFiles for existing table."); + return false; + } + } + + // Creating the pre-split table + Utility.createTable(admin, table, splitCount, tableOptions); + logger.info(table + " created successfully"); + + Job job = createSubmittableJob(conf); + + Table hbaseTable = connection.getTable(tableName); + + // Auto configure partitioner and reducer + HFileOutputFormat2.configureIncrementalLoad(job, hbaseTable, hbaseTable.getRegionLocator()); + + FileOutputFormat.setOutputPath(job, outputDirectory); + + boolean result = job.waitForCompletion(true); + + if (result) { + logger.info("HFiles generated successfully. Starting bulk load to " + table); + BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(conf); + Map bulkLoadedHFiles = + bulkLoadHFilesTool.bulkLoad(tableName, outputDirectory); + boolean status = !bulkLoadedHFiles.isEmpty(); + logger.info("BulkLoadHFiles finished successfully with status " + status); + return status; + } else { + logger.info("Failed to generate HFiles."); + return false; + } + } catch (Exception e) { + logger.error("Failed to generate data", e); + return false; + } finally { + FileSystem.get(conf).deleteOnExit(outputDirectory); + } + } + + protected Job createSubmittableJob(Configuration conf) throws IOException { + + conf.setInt(BulkDataGeneratorMapper.SPLIT_COUNT_KEY, splitCount); + conf.setInt(BulkDataGeneratorInputFormat.MAPPER_TASK_COUNT_KEY, mapperCount); + conf.setLong(BulkDataGeneratorRecordReader.RECORDS_PER_MAPPER_TASK_KEY, rowsPerMapper); + + Job job = new Job(conf, BulkDataGeneratorTool.class.getSimpleName() + " - " + table); + + job.setJarByClass(BulkDataGeneratorMapper.class); + job.setInputFormatClass(BulkDataGeneratorInputFormat.class); + + HBaseConfiguration.addHbaseResources(conf); + + job.setMapperClass(BulkDataGeneratorMapper.class); + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + job.setMapOutputValueClass(KeyValue.class); + + return job; + } + + /** Returns Random output directory path where HFiles will be generated */ + protected Path generateOutputDirectory() { + final String outputDirectory = + OUTPUT_DIRECTORY_PREFIX + "/" + table + "-" + System.currentTimeMillis(); + return new Path(outputDirectory); + } + + /** + * This method parses the command line parameters into instance variables + */ + protected void readCommandLineParameters(Configuration conf, CommandLine line) + throws ParseException, IOException { + final List genericParameters = new ArrayList(); + + // Parse the generic options + for (Map.Entry entry : line.getOptionProperties("D").entrySet()) { + genericParameters.add("-D"); + genericParameters.add(entry.getKey() + "=" + entry.getValue()); + } + + logger.info( + "Parsed generic parameters: " + Arrays.toString(genericParameters.toArray(new String[0]))); + + new GenericOptionsParser(conf, genericParameters.toArray(new String[0])); + + table = line.getOptionValue("table"); + + if (line.hasOption("mapper-count")) { + mapperCount = Integer.parseInt(line.getOptionValue("mapper-count")); + } + if (line.hasOption("split-count")) { + splitCount = Integer.parseInt(line.getOptionValue("split-count")); + } + if (line.hasOption("rows-per-mapper")) { + rowsPerMapper = Long.parseLong(line.getOptionValue("rows-per-mapper")); + } + + deleteTableIfExist = line.hasOption("delete-if-exist"); + + parseTableOptions(line); + } + + private void parseTableOptions(final CommandLine line) { + final String tableOptionsAsString = line.getOptionValue("table-options"); + if (!StringUtils.isEmpty(tableOptionsAsString)) { + for (String tableOption : tableOptionsAsString.split(",")) { + final String[] keyValueSplit = tableOption.split("="); + final String key = keyValueSplit[0]; + final String value = keyValueSplit[1]; + tableOptions.put(key, value); + } + } + } + + /** Returns the command line option for {@link BulkDataGeneratorTool} */ + protected Options getOptions() { + final Options options = new Options(); + Option option = + new Option("t", "table", true, "The table name for which data need to be generated."); + options.addOption(option); + + option = new Option("d", "delete-if-exist", false, + "If it's set, the table will be deleted if already exist."); + options.addOption(option); + + option = + new Option("mc", "mapper-count", true, "The number of mapper containers to be launched."); + options.addOption(option); + + option = new Option("sc", "split-count", true, + "The number of regions/pre-splits to be created for the table."); + options.addOption(option); + + option = + new Option("r", "rows-per-mapper", true, "The number of rows to be generated PER mapper."); + options.addOption(option); + + option = + new Option("o", "table-options", true, "Table options to be set while creating the table."); + options.addOption(option); + + option = new Option("h", "help", false, "Show help message for the tool"); + options.addOption(option); + + return options; + } + + protected void printUsage() { + final HelpFormatter helpFormatter = new HelpFormatter(); + helpFormatter.setWidth(120); + final String helpMessageCommand = "hbase " + BulkDataGeneratorTool.class.getName(); + final String commandSyntax = helpMessageCommand + " [-D]*"; + final String helpMessageSuffix = "Examples:\n" + helpMessageCommand + + " -t TEST_TABLE -mc 10 -r 100 -sc 10\n" + helpMessageCommand + + " -t TEST_TABLE -mc 10 -r 100 -sc 10 -d -o \"BACKUP=false,NORMALIZATION_ENABLED=false\"\n" + + helpMessageCommand + " -t TEST_TABLE -mc 10 -r 100 -sc 10 -Dmapreduce.map.memory.mb=8192\n"; + helpFormatter.printHelp(commandSyntax, "", getOptions(), helpMessageSuffix); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/Utility.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/Utility.java new file mode 100644 index 000000000000..3db75239a646 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/bulkdatagenerator/Utility.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util.bulkdatagenerator; + +import java.io.IOException; +import java.util.Map; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +public final class Utility { + + /** + * Schema for HBase table to be generated by generated and populated by + * {@link BulkDataGeneratorTool} + */ + public enum TableColumnNames { + ORG_ID("orgId".getBytes()), + TOOL_EVENT_ID("toolEventId".getBytes()), + EVENT_ID("eventId".getBytes()), + VEHICLE_ID("vehicleId".getBytes()), + SPEED("speed".getBytes()), + LATITUDE("latitude".getBytes()), + LONGITUDE("longitude".getBytes()), + LOCATION("location".getBytes()), + TIMESTAMP("timestamp".getBytes()); + + private final byte[] columnName; + + TableColumnNames(byte[] column) { + this.columnName = column; + } + + public byte[] getColumnName() { + return this.columnName; + } + } + + public static final String COLUMN_FAMILY = "cf"; + + public static final int SPLIT_PREFIX_LENGTH = 6; + + public static final int MAX_SPLIT_COUNT = (int) Math.pow(10, SPLIT_PREFIX_LENGTH); + + /** + * Private Constructor + */ + private Utility() { + + } + + public static void deleteTable(Admin admin, String tableName) throws IOException { + admin.disableTable(TableName.valueOf(tableName)); + admin.deleteTable(TableName.valueOf(tableName)); + } + + /** + * Creates a pre-splitted HBase Table having single column family ({@link #COLUMN_FAMILY}) and + * sequential splits with {@link #SPLIT_PREFIX_LENGTH} length character prefix. Example: If a + * table (TEST_TABLE_1) need to be generated with splitCount as 10, table would be created with + * (10+1) regions with boundaries end-keys as (000000-000001, 000001-000002, 000002-000003, ...., + * 0000010-) + * @param admin - Admin object associated with HBase connection + * @param tableName - Name of table to be created + * @param splitCount - Number of splits for the table (Number of regions will be splitCount + 1) + * @param tableOptions - Additional HBase metadata properties to be set for the table + */ + public static void createTable(Admin admin, String tableName, int splitCount, + Map tableOptions) throws IOException { + Preconditions.checkArgument(splitCount > 0, "Split count must be greater than 0"); + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); + tableOptions.forEach(tableDescriptorBuilder::setValue); + TableDescriptor tableDescriptor = tableDescriptorBuilder + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(COLUMN_FAMILY)).build(); + // Pre-splitting table based on splitCount + byte[][] splitKeys = new byte[splitCount][]; + for (int i = 0; i < splitCount; i++) { + splitKeys[i] = String.format("%0" + Utility.SPLIT_PREFIX_LENGTH + "d", i + 1).getBytes(); + } + admin.createTable(tableDescriptor, splitKeys); + } +} diff --git a/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc b/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc new file mode 100644 index 000000000000..3ac6ca693121 --- /dev/null +++ b/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc @@ -0,0 +1,132 @@ +//// +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +//// + +== Bulk Data Generator Tool +:doctype: book +:numbered: +:toc: left +:icons: font +:experimental: + +This is a random data generator tool for HBase tables leveraging Hbase bulk load. +It can create pre-splited HBase table and the generated data is *uniformly distributed* to all the regions of the table. + +=== How to Use + +[source] +---- +usage: hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool [-D]* + -d,--delete-if-exist If it's set, the table will be deleted if already exist. + -h,--help Show help message for the tool + -mc,--mapper-count The number of mapper containers to be launched. + -o,--table-options Table options to be set while creating the table. + -r,--rows-per-mapper The number of rows to be generated PER mapper. + -sc,--split-count The number of regions/pre-splits to be created for the table. + -t,--table The table name for which data need to be generated. +---- + +---- +Examples: + +hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10 + +hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10 -d -o "BACKUP=false,NORMALIZATION_ENABLED=false" + +hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10 -Dmapreduce.map.memory.mb=8192 +---- + +=== How it Works + +==== Table Schema +Tool generates a HBase table with single column family, i.e. *cf* and 9 columns i.e. +---- +ORG_ID, TOOL_EVENT_ID, EVENT_ID, VEHICLE_ID, SPEED, LATITUDE, LONGITUDE, LOCATION, TIMESTAMP +---- +with row key as +---- +: +---- + +==== Table Creation +Tool creates a pre-splited HBase Table having "*split-count*" splits (i.e. *split-count* + 1 regions) with sequential 6 digit region boundary prefix. +Example: If a table is generated with "*split-count*" as 10, it will have (10+1) regions with following start-end keys. +---- +(-000001, 000001-000002, 000002-000003, ...., 000009-000010, 0000010-) +---- + +==== Data Generation +Tool creates and run a MR job to generate the HFiles, which are bulk loaded to table regions via `org.apache.hadoop.hbase.tool.BulkLoadHFilesTool`. +The number of mappers is defined in input as "*mapper-count*". Each mapper generates "*records-per-mapper*" rows. + +`org.apache.hadoop.hbase.util.bulkdatageneratorBulkDataGeneratorRecordReader` ensures that each record generated by mapper is associated with index (added to the key) ranging from 1 to "*records-per-mapper*". + +The TOOL_EVENT_ID column for each row has a 6 digit prefix as +---- +(index) mod ("split-count" + 1) +---- +Example, if 10 records are to be generated by each mapper and "*split-count*" is 4, the TOOL_EVENT_IDs for each record will have a prefix as +[options="header"] +|=== +|Record Index|TOOL_EVENT_ID's first six characters +//---------------------- +|1|000001 +|2|000002 +|3|000003 +|4|000004 +|5|000000 +|6|000001 +|7|000002 +|8|000003 +|9|000004 +|10|000005 +|=== +Since TOOL_EVENT_ID is first attribute of row key and table region boundaries are also having start-end keys as 6 digit sequential prefixes, this ensures that each mapper generates (nearly) same number of rows for each region, making the data uniformly distributed. +TOOL_EVENT_ID suffix and other columns of the row are populated with random data. + +Number of rows generated is +---- +rows-per-mapper * mapper-count +---- + +Size of each rows is (approximately) +---- +850 B +---- + +=== Experiments +These results are from a 11 node cluster having HBase and Hadoop service running within self-managed test environment +[options="header"] +|=== +|Data Size|Time to Generate Data (mins) +//---------------------- +|100 GB|6 minutes +|340 GB|13 minutes +|3.5 TB|3 hours 10 minutes +|=== + + +:numbered: + +ifdef::backend-docbook[] +[index] +== Index +// Generated automatically by the DocBook toolchain. +endif::backend-docbook[] diff --git a/src/main/asciidoc/book.adoc b/src/main/asciidoc/book.adoc index b8c648e8bb6c..f02f5000c78c 100644 --- a/src/main/asciidoc/book.adoc +++ b/src/main/asciidoc/book.adoc @@ -90,6 +90,7 @@ include::_chapters/community.adoc[] include::_chapters/hbtop.adoc[] include::_chapters/tracing.adoc[] include::_chapters/store_file_tracking.adoc[] +include::_chapters/bulk_data_generator_tool.adoc[] = Appendix From b9a244cf2707cd27dc4366e20ed7f1de980a6638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E5=86=B2?= Date: Sat, 17 Jun 2023 15:41:15 +0800 Subject: [PATCH 010/514] HBASE-27937 Update getting_started.adoc (#5290) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/getting_started.adoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc index 3a85f90d969f..bf501d47c699 100644 --- a/src/main/asciidoc/_chapters/getting_started.adoc +++ b/src/main/asciidoc/_chapters/getting_started.adoc @@ -371,13 +371,13 @@ The following command starts four additional RegionServers, running on sequentia + ---- -$ .bin/local-regionservers.sh start 2 3 4 5 +$ ./bin/local-regionservers.sh start 2 3 4 5 ---- + To stop a RegionServer manually, use the `local-regionservers.sh` command with the `stop` parameter and the offset of the server to stop. + ---- -$ .bin/local-regionservers.sh stop 3 +$ ./bin/local-regionservers.sh stop 3 ---- . Stop HBase. From cd3f94df4d919637591e3fda3923c16841602350 Mon Sep 17 00:00:00 2001 From: chenglei Date: Sat, 17 Jun 2023 17:55:05 +0800 Subject: [PATCH 011/514] =?UTF-8?q?HBASE-27940=20Midkey=20metadata=20in=20?= =?UTF-8?q?root=20index=20block=20would=20always=20be=20ignor=E2=80=A6=20(?= =?UTF-8?q?#5296)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Duo Zhang --- .../hadoop/hbase/io/hfile/HFileBlock.java | 103 ++++++++++-------- .../hbase/io/hfile/HFileBlockIndex.java | 8 +- .../hbase/io/hfile/NoOpIndexBlockEncoder.java | 8 +- .../hbase/io/hfile/TestHFileBlockIndex.java | 97 +++++++++++++++++ 4 files changed, 164 insertions(+), 52 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 434529ec46f8..b4bb2fb2c900 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -172,7 +172,7 @@ static class Header { * {@link #onDiskSizeWithoutHeader} when using HDFS checksum. * @see Writer#putHeader(byte[], int, int, int, int) */ - private int onDiskDataSizeWithHeader; + private final int onDiskDataSizeWithHeader; // End of Block Header fields. /** @@ -188,13 +188,15 @@ static class Header { * ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. So, * we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be good if * could be confined to cache-use only but hard-to-do. + *

+ * NOTE: this byteBuff including HFileBlock header and data, but excluding checksum. */ - private ByteBuff buf; + private ByteBuff bufWithoutChecksum; /** * Meta data that holds meta information on the hfileblock. */ - private HFileContext fileContext; + private final HFileContext fileContext; /** * The offset of this block in the file. Populated by the reader for convenience of access. This @@ -296,6 +298,8 @@ public int getDeserializerIdentifier() { CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER); } + private final int totalChecksumBytes; + /** * Creates a new {@link HFile} block from the given fields. This constructor is used only while * writing blocks and caching, and is sitting in a byte buffer and we want to stuff the block into @@ -332,11 +336,12 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, this.nextBlockOnDiskSize = nextBlockOnDiskSize; this.fileContext = fileContext; this.allocator = allocator; - this.buf = buf; + this.bufWithoutChecksum = buf; if (fillHeader) { overwriteHeader(); } - this.buf.rewind(); + this.bufWithoutChecksum.rewind(); + this.totalChecksumBytes = computeTotalChecksumBytes(); } /** @@ -411,12 +416,12 @@ public BlockType getBlockType() { @Override public int refCnt() { - return buf.refCnt(); + return bufWithoutChecksum.refCnt(); } @Override public HFileBlock retain() { - buf.retain(); + bufWithoutChecksum.retain(); return this; } @@ -426,7 +431,7 @@ public HFileBlock retain() { */ @Override public boolean release() { - return buf.release(); + return bufWithoutChecksum.release(); } /** @@ -441,7 +446,7 @@ public HFileBlock touch() { @Override public HFileBlock touch(Object hint) { - buf.touch(hint); + bufWithoutChecksum.touch(hint); return this; } @@ -451,7 +456,7 @@ short getDataBlockEncodingId() { throw new IllegalArgumentException("Querying encoder ID of a block " + "of type other than " + BlockType.ENCODED_DATA + ": " + blockType); } - return buf.getShort(headerSize()); + return bufWithoutChecksum.getShort(headerSize()); } /** Returns the on-disk size of header + data part + checksum. */ @@ -479,15 +484,15 @@ long getPrevBlockOffset() { * side-effect. */ private void overwriteHeader() { - buf.rewind(); - blockType.write(buf); - buf.putInt(onDiskSizeWithoutHeader); - buf.putInt(uncompressedSizeWithoutHeader); - buf.putLong(prevBlockOffset); + bufWithoutChecksum.rewind(); + blockType.write(bufWithoutChecksum); + bufWithoutChecksum.putInt(onDiskSizeWithoutHeader); + bufWithoutChecksum.putInt(uncompressedSizeWithoutHeader); + bufWithoutChecksum.putLong(prevBlockOffset); if (this.fileContext.isUseHBaseChecksum()) { - buf.put(fileContext.getChecksumType().getCode()); - buf.putInt(fileContext.getBytesPerChecksum()); - buf.putInt(onDiskDataSizeWithHeader); + bufWithoutChecksum.put(fileContext.getChecksumType().getCode()); + bufWithoutChecksum.putInt(fileContext.getBytesPerChecksum()); + bufWithoutChecksum.putInt(onDiskDataSizeWithHeader); } } @@ -507,11 +512,12 @@ public ByteBuff getBufferWithoutHeader() { * in {@link CompoundBloomFilter} to avoid object creation on every Bloom filter lookup, but has * to be used with caution. Buffer holds header, block content, and any follow-on checksums if * present. - * @return the buffer of this block for read-only operations + * @return the buffer of this block for read-only operations,the buffer includes header,but not + * checksum. */ public ByteBuff getBufferReadOnly() { // TODO: ByteBuf does not support asReadOnlyBuffer(). Fix. - ByteBuff dup = this.buf.duplicate(); + ByteBuff dup = this.bufWithoutChecksum.duplicate(); assert dup.position() == 0; return dup; } @@ -545,7 +551,7 @@ private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromFie */ void sanityCheck() throws IOException { // Duplicate so no side-effects - ByteBuff dup = this.buf.duplicate().rewind(); + ByteBuff dup = this.bufWithoutChecksum.duplicate().rewind(); sanityCheckAssertion(BlockType.read(dup), blockType); sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader"); @@ -588,8 +594,8 @@ public String toString() { .append(", prevBlockOffset=").append(prevBlockOffset).append(", isUseHBaseChecksum=") .append(fileContext.isUseHBaseChecksum()); if (fileContext.isUseHBaseChecksum()) { - sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))) - .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)) + sb.append(", checksumType=").append(ChecksumType.codeToType(this.bufWithoutChecksum.get(24))) + .append(", bytesPerChecksum=").append(this.bufWithoutChecksum.getInt(24 + 1)) .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); } else { sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader).append("(") @@ -597,9 +603,10 @@ public String toString() { .append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); } String dataBegin; - if (buf.hasArray()) { - dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), - Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); + if (bufWithoutChecksum.hasArray()) { + dataBegin = Bytes.toStringBinary(bufWithoutChecksum.array(), + bufWithoutChecksum.arrayOffset() + headerSize(), + Math.min(32, bufWithoutChecksum.limit() - bufWithoutChecksum.arrayOffset() - headerSize())); } else { ByteBuff bufWithoutHeader = getBufferWithoutHeader(); byte[] dataBeginBytes = @@ -609,8 +616,8 @@ public String toString() { } sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()) .append(", totalChecksumBytes=").append(totalChecksumBytes()).append(", isUnpacked=") - .append(isUnpacked()).append(", buf=[").append(buf).append("]").append(", dataBeginsWith=") - .append(dataBegin).append(", fileContext=").append(fileContext) + .append(isUnpacked()).append(", buf=[").append(bufWithoutChecksum).append("]") + .append(", dataBeginsWith=").append(dataBegin).append(", fileContext=").append(fileContext) .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize).append("]"); return sb.toString(); } @@ -639,7 +646,7 @@ HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException : reader.getDefaultBlockDecodingContext(); // Create a duplicated buffer without the header part. int headerSize = this.headerSize(); - ByteBuff dup = this.buf.duplicate(); + ByteBuff dup = this.bufWithoutChecksum.duplicate(); dup.position(headerSize); dup = dup.slice(); // Decode the dup into unpacked#buf @@ -662,7 +669,7 @@ private ByteBuff allocateBufferForUnpacking() { int headerSize = headerSize(); int capacityNeeded = headerSize + uncompressedSizeWithoutHeader; - ByteBuff source = buf.duplicate(); + ByteBuff source = bufWithoutChecksum.duplicate(); ByteBuff newBuf = allocator.allocate(capacityNeeded); // Copy header bytes into newBuf. @@ -681,7 +688,7 @@ private ByteBuff allocateBufferForUnpacking() { public boolean isUnpacked() { final int headerSize = headerSize(); final int expectedCapacity = headerSize + uncompressedSizeWithoutHeader; - final int bufCapacity = buf.remaining(); + final int bufCapacity = bufWithoutChecksum.remaining(); return bufCapacity == expectedCapacity || bufCapacity == expectedCapacity + headerSize; } @@ -697,9 +704,9 @@ long getOffset() { return offset; } - /** Returns a byte stream reading the data + checksum of this block */ + /** Returns a byte stream reading the data(excluding header and checksum) of this block */ DataInputStream getByteStream() { - ByteBuff dup = this.buf.duplicate(); + ByteBuff dup = this.bufWithoutChecksum.duplicate(); dup.position(this.headerSize()); return new DataInputStream(new ByteBuffInputStream(dup)); } @@ -708,9 +715,9 @@ DataInputStream getByteStream() { public long heapSize() { long size = FIXED_OVERHEAD; size += fileContext.heapSize(); - if (buf != null) { + if (bufWithoutChecksum != null) { // Deep overhead of the byte buffer. Needs to be aligned separately. - size += ClassSize.align(buf.capacity() + MULTI_BYTE_BUFFER_HEAP_SIZE); + size += ClassSize.align(bufWithoutChecksum.capacity() + MULTI_BYTE_BUFFER_HEAP_SIZE); } return ClassSize.align(size); } @@ -1861,9 +1868,9 @@ void sanityCheckUncompressed() throws IOException { // Cacheable implementation @Override public int getSerializedLength() { - if (buf != null) { + if (bufWithoutChecksum != null) { // Include extra bytes for block metadata. - return this.buf.limit() + BLOCK_METADATA_SPACE; + return this.bufWithoutChecksum.limit() + BLOCK_METADATA_SPACE; } return 0; } @@ -1871,7 +1878,7 @@ public int getSerializedLength() { // Cacheable implementation @Override public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { - this.buf.get(destination, 0, getSerializedLength() - BLOCK_METADATA_SPACE); + this.bufWithoutChecksum.get(destination, 0, getSerializedLength() - BLOCK_METADATA_SPACE); destination = addMetaData(destination, includeNextBlockMetadata); // Make it ready for reading. flip sets position to zero and limit to current position which @@ -1917,7 +1924,7 @@ public int hashCode() { result = result * 31 + onDiskSizeWithoutHeader; result = result * 31 + (int) (prevBlockOffset ^ (prevBlockOffset >>> 32)); result = result * 31 + uncompressedSizeWithoutHeader; - result = result * 31 + buf.hashCode(); + result = result * 31 + bufWithoutChecksum.hashCode(); return result; } @@ -1955,8 +1962,8 @@ public boolean equals(Object comparison) { return false; } if ( - ByteBuff.compareTo(this.buf, 0, this.buf.limit(), castedComparison.buf, 0, - castedComparison.buf.limit()) != 0 + ByteBuff.compareTo(this.bufWithoutChecksum, 0, this.bufWithoutChecksum.limit(), + castedComparison.bufWithoutChecksum, 0, castedComparison.bufWithoutChecksum.limit()) != 0 ) { return false; } @@ -1984,10 +1991,17 @@ int getOnDiskDataSizeWithHeader() { } /** - * Calculate the number of bytes required to store all the checksums for this block. Each checksum - * value is a 4 byte integer. + * Return the number of bytes required to store all the checksums for this block. Each checksum + * value is a 4 byte integer.
+ * NOTE: ByteBuff returned by {@link HFileBlock#getBufferWithoutHeader()} and + * {@link HFileBlock#getBufferReadOnly} or DataInputStream returned by + * {@link HFileBlock#getByteStream()} does not include checksum. */ int totalChecksumBytes() { + return totalChecksumBytes; + } + + private int computeTotalChecksumBytes() { // If the hfile block has minorVersion 0, then there are no checksum // data to validate. Similarly, a zero value in this.bytesPerChecksum // indicates that cached blocks do not have checksum data because @@ -2084,7 +2098,8 @@ private static HFileBlock shallowClone(HFileBlock blk, ByteBuff newBuf) { } static HFileBlock deepCloneOnHeap(HFileBlock blk) { - ByteBuff deepCloned = ByteBuff.wrap(ByteBuffer.wrap(blk.buf.toBytes(0, blk.buf.limit()))); + ByteBuff deepCloned = ByteBuff + .wrap(ByteBuffer.wrap(blk.bufWithoutChecksum.toBytes(0, blk.bufWithoutChecksum.limit()))); return createBuilder(blk, deepCloned).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index b5a5095c3367..12ef197af439 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -883,10 +883,10 @@ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throw */ public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); - // after reading the root index the checksum bytes have to - // be subtracted to know if the mid key exists. - int checkSumBytes = blk.totalChecksumBytes(); - if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) { + // HFileBlock.getByteStream() returns a byte stream for reading the data(excluding checksum) + // of root index block, so after reading the root index there is no need to subtract the + // checksum bytes. + if (in.available() < MID_KEY_METADATA_SIZE) { // No mid-key metadata available. return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java index 9e480247ee9a..3115a5153c21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java @@ -204,10 +204,10 @@ public void initRootIndex(HFileBlock blk, int numEntries, CellComparator compara private void init(HFileBlock blk, int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); - // after reading the root index the checksum bytes have to - // be subtracted to know if the mid key exists. - int checkSumBytes = blk.totalChecksumBytes(); - if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) { + // HFileBlock.getByteStream() returns a byte stream for reading the data(excluding checksum) + // of root index block, so after reading the root index there is no need to subtract the + // checksum bytes. + if (in.available() < MID_KEY_METADATA_SIZE) { // No mid-key metadata available. return; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 5b8cfadfde78..1c158ccdf3b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; +import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -50,10 +51,14 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ByteBuffAllocator; +import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding; +import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; +import org.apache.hadoop.hbase.io.hfile.NoOpIndexBlockEncoder.NoOpEncodedSeeker; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.MultiByteBuff; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -740,4 +745,96 @@ public void testIntermediateLevelIndicesWithLargeKeys(int minNumEntries) throws } reader.close(); } + + /** + * This test is for HBASE-27940, which midkey metadata in root index block would always be ignored + * by {@link BlockIndexReader#readMultiLevelIndexRoot}. + */ + @Test + public void testMidKeyReadSuccessfullyFromRootIndexBlock() throws IOException { + conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, 128); + Path hfilePath = + new Path(TEST_UTIL.getDataTestDir(), "testMidKeyReadSuccessfullyFromRootIndexBlock"); + Compression.Algorithm compressAlgo = Compression.Algorithm.NONE; + int entryCount = 50000; + HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(false) + .withDataBlockEncoding(DataBlockEncoding.NONE).withCompression(compressAlgo).build(); + + try (HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf)) + .withPath(fs, hfilePath).withFileContext(context).create()) { + + List keyValues = new ArrayList<>(entryCount); + for (int i = 0; i < entryCount; ++i) { + byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(RNG, i); + // A random-length random value. + byte[] valueBytes = RandomKeyValueUtil.randomValue(RNG); + KeyValue keyValue = + new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes); + writer.append(keyValue); + keyValues.add(keyValue); + } + } + + try (FSDataInputStream fsdis = fs.open(hfilePath)) { + long fileSize = fs.getFileStatus(hfilePath).getLen(); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); + + assertEquals(3, trailer.getMajorVersion()); + assertEquals(entryCount, trailer.getEntryCount()); + HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo) + .withIncludesMvcc(false).withIncludesTags(false) + .withDataBlockEncoding(DataBlockEncoding.NONE).withHBaseCheckSum(true).build(); + ReaderContext readerContext = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(fsdis)) + .withFilePath(hfilePath).withFileSystem(fs).withFileSize(fileSize).build(); + HFileBlock.FSReader blockReader = + new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf); + + MyEncoder encoder = new MyEncoder(); + HFileBlockIndex.CellBasedKeyBlockIndexReaderV2 dataBlockIndexReader = + new HFileBlockIndex.CellBasedKeyBlockIndexReaderV2(trailer.createComparator(), + trailer.getNumDataIndexLevels(), encoder); + + HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), + fileSize - trailer.getTrailerSize()); + // Data index. We also read statistics about the block index written after + // the root level. + dataBlockIndexReader.readMultiLevelIndexRoot( + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + NoOpEncodedSeeker noOpEncodedSeeker = (NoOpEncodedSeeker) encoder.encoderSeeker; + // Assert we have read midkey metadata successfully. + assertTrue(noOpEncodedSeeker.midLeafBlockOffset >= 0); + assertTrue(noOpEncodedSeeker.midLeafBlockOnDiskSize > 0); + assertTrue(noOpEncodedSeeker.midKeyEntry >= 0); + } + } + + static class MyEncoder implements HFileIndexBlockEncoder { + + EncodedSeeker encoderSeeker; + + @Override + public void saveMetadata(Writer writer) throws IOException { + NoOpIndexBlockEncoder.INSTANCE.saveMetadata(writer); + + } + + @Override + public void encode(BlockIndexChunk blockIndexChunk, boolean rootIndexBlock, DataOutput out) + throws IOException { + NoOpIndexBlockEncoder.INSTANCE.encode(blockIndexChunk, rootIndexBlock, out); + } + + @Override + public IndexBlockEncoding getIndexBlockEncoding() { + return NoOpIndexBlockEncoder.INSTANCE.getIndexBlockEncoding(); + } + + @Override + public EncodedSeeker createSeeker() { + encoderSeeker = NoOpIndexBlockEncoder.INSTANCE.createSeeker(); + return encoderSeeker; + } + + } } From cf02edbb1cea37a9a1cc4a64ff50f77a86955e5c Mon Sep 17 00:00:00 2001 From: Jing Yu Date: Tue, 20 Jun 2023 17:52:16 -0400 Subject: [PATCH 012/514] HBASE-27902 Utility to invoke coproc on multiple servers using AsyncAdmin (#5266) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../hbase/client/AsyncAdminClientUtils.java | 86 +++++++++ ...CoprocessorOnAllRegionServersEndpoint.java | 170 ++++++++++++++++++ 2 files changed, 256 insertions(+) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminClientUtils.java create mode 100644 hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorOnAllRegionServersEndpoint.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminClientUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminClientUtils.java new file mode 100644 index 000000000000..b7bce4d2a4e4 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminClientUtils.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; + +/** + * Additional Asynchronous Admin capabilities for clients. + */ +@InterfaceAudience.Public +public final class AsyncAdminClientUtils { + + private AsyncAdminClientUtils() { + } + + /** + * Execute the given coprocessor call on all region servers. + *

+ * The {@code stubMaker} is just a delegation to the {@code newStub} call. Usually it is only a + * one line lambda expression, like: + * + *

+   * channel -> xxxService.newStub(channel)
+   * 
+ * + * @param asyncAdmin the asynchronous administrative API for HBase. + * @param stubMaker a delegation to the actual {@code newStub} call. + * @param callable a delegation to the actual protobuf rpc call. See the comment of + * {@link ServiceCaller} for more details. + * @param the type of the asynchronous stub + * @param the type of the return value + * @return Map of each region server to its result of the protobuf rpc call, wrapped by a + * {@link CompletableFuture}. + * @see ServiceCaller + */ + public static CompletableFuture> + coprocessorServiceOnAllRegionServers(AsyncAdmin asyncAdmin, Function stubMaker, + ServiceCaller callable) { + CompletableFuture> future = new CompletableFuture<>(); + FutureUtils.addListener(asyncAdmin.getRegionServers(), (regionServers, error) -> { + if (error != null) { + future.completeExceptionally(error); + return; + } + Map resultMap = new ConcurrentHashMap<>(); + for (ServerName regionServer : regionServers) { + FutureUtils.addListener(asyncAdmin.coprocessorService(stubMaker, callable, regionServer), + (server, err) -> { + if (err != null) { + resultMap.put(regionServer, err); + } else { + resultMap.put(regionServer, server); + } + if (resultMap.size() == regionServers.size()) { + future.complete(Collections.unmodifiableMap(resultMap)); + } + }); + } + }); + return future; + } +} diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorOnAllRegionServersEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorOnAllRegionServersEndpoint.java new file mode 100644 index 000000000000..9dc3b9c75f1e --- /dev/null +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorOnAllRegionServersEndpoint.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.FileNotFoundException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AsyncAdminClientUtils; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.client.ServiceCaller; +import org.apache.hadoop.hbase.client.TestAsyncAdminBase; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.Service; + +import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest; +import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyResponse; +import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService; + +@RunWith(Parameterized.class) +@Category({ ClientTests.class, MediumTests.class }) +public class TestAsyncCoprocessorOnAllRegionServersEndpoint extends TestAsyncAdminBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAsyncCoprocessorOnAllRegionServersEndpoint.class); + + private static final String THROW_CLASS_NAME = "java.io.FileNotFoundException"; + private static final String DUMMY_VALUE = "val"; + private static final int NUM_SLAVES = 5; + private static final int NUM_SUCCESS_REGION_SERVERS = 3; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); + TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + ProtobufCoprocessorService.class.getName()); + TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, + DummyRegionServerEndpoint.class.getName()); + TEST_UTIL.startMiniCluster(NUM_SLAVES); + ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testRegionServersCoprocessorService() + throws ExecutionException, InterruptedException { + DummyRequest request = DummyRequest.getDefaultInstance(); + Map resultMap = AsyncAdminClientUtils.coprocessorServiceOnAllRegionServers(admin, + DummyService::newStub, (ServiceCaller) (stub, controller, + rpcCallback) -> stub.dummyCall(controller, request, rpcCallback)) + .get(); + + resultMap.forEach((k, v) -> { + assertTrue(v instanceof DummyResponse); + DummyResponse resp = (DummyResponse) v; + assertEquals(DUMMY_VALUE, resp.getValue()); + }); + } + + @Test + public void testRegionServerCoprocessorsServiceAllFail() + throws ExecutionException, InterruptedException { + DummyRequest request = DummyRequest.getDefaultInstance(); + Map resultMap = AsyncAdminClientUtils.coprocessorServiceOnAllRegionServers(admin, + DummyService::newStub, (ServiceCaller) (stub, controller, + rpcCallback) -> stub.dummyThrow(controller, request, rpcCallback)) + .get(); + + resultMap.forEach((k, v) -> { + assertTrue(v instanceof RetriesExhaustedException); + Throwable e = (Throwable) v; + assertTrue(e.getMessage().contains(THROW_CLASS_NAME)); + }); + } + + @Test + public void testRegionServerCoprocessorsServicePartialFail() + throws ExecutionException, InterruptedException { + DummyRequest request = DummyRequest.getDefaultInstance(); + AtomicInteger callCount = new AtomicInteger(); + Map resultMap = + AsyncAdminClientUtils.coprocessorServiceOnAllRegionServers(admin, DummyService::newStub, + (ServiceCaller) (stub, controller, rpcCallback) -> { + callCount.addAndGet(1); + if (callCount.get() <= NUM_SUCCESS_REGION_SERVERS) { + stub.dummyCall(controller, request, rpcCallback); + } else { + stub.dummyThrow(controller, request, rpcCallback); + } + }).get(); + + AtomicInteger successCallCount = new AtomicInteger(); + resultMap.forEach((k, v) -> { + if (v instanceof DummyResponse) { + successCallCount.addAndGet(1); + DummyResponse resp = (DummyResponse) v; + assertEquals(DUMMY_VALUE, resp.getValue()); + } else { + assertTrue(v instanceof RetriesExhaustedException); + Throwable e = (Throwable) v; + assertTrue(e.getMessage().contains(THROW_CLASS_NAME)); + } + }); + assertEquals(NUM_SUCCESS_REGION_SERVERS, successCallCount.get()); + } + + public static class DummyRegionServerEndpoint extends DummyService + implements RegionServerCoprocessor { + @Override + public Iterable getServices() { + return Collections.singleton(this); + } + + @Override + public void dummyCall(RpcController controller, DummyRequest request, + RpcCallback callback) { + callback.run(DummyResponse.newBuilder().setValue(DUMMY_VALUE).build()); + } + + @Override + public void dummyThrow(RpcController controller, DummyRequest request, + RpcCallback done) { + CoprocessorRpcUtils.setControllerException(controller, + new FileNotFoundException("/file.txt")); + } + } +} From da171c341eab8adebab756743243d94477a9074f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 22 Jun 2023 21:33:33 +0800 Subject: [PATCH 013/514] HBASE-27936 NPE in StoreFileReader.passesGeneralRowPrefixBloomFilter() (#5300) Need to also copy bloomFilterMetrics in StoreFileReader.copyFields Signed-off-by: Viraj Jasani --- .../hbase/regionserver/StoreFileReader.java | 14 +- .../hbase/regionserver/StoreFileWriter.java | 3 +- .../hbase/regionserver/RegionAsTable.java | 47 ++-- .../regionserver/TestBloomFilterFaulty.java | 200 ++++++++++++++++++ 4 files changed, 235 insertions(+), 29 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBloomFilterFaulty.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index dc8b06200aec..72e93c3f75a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.DELETE_FAMILY_COUNT; import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY; +import com.google.errorprone.annotations.RestrictedApi; import java.io.DataInput; import java.io.IOException; import java.util.Map; @@ -103,6 +104,7 @@ void copyFields(StoreFileReader storeFileReader) throws IOException { this.generalBloomFilter = storeFileReader.generalBloomFilter; this.deleteFamilyBloomFilter = storeFileReader.deleteFamilyBloomFilter; this.bloomFilterType = storeFileReader.bloomFilterType; + this.bloomFilterMetrics = storeFileReader.bloomFilterMetrics; this.sequenceID = storeFileReader.sequenceID; this.timeRange = storeFileReader.timeRange; this.lastBloomKey = storeFileReader.lastBloomKey; @@ -496,7 +498,9 @@ public Map loadFileInfo() throws IOException { return fi; } - public void loadBloomfilter() { + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + void loadBloomfilter() { this.loadBloomfilter(BlockType.GENERAL_BLOOM_META, null); this.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META, null); } @@ -546,7 +550,9 @@ public void loadBloomfilter(BlockType blockType, BloomFilterMetrics metrics) { } } - private void setBloomFilterFaulty(BlockType blockType) { + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/StoreFileReader.java|.*/src/test/.*") + void setBloomFilterFaulty(BlockType blockType) { if (blockType == BlockType.GENERAL_BLOOM_META) { setGeneralBloomFilterFaulty(); } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { @@ -563,11 +569,11 @@ public long getFilterEntries() { return generalBloomFilter != null ? generalBloomFilter.getKeyCount() : reader.getEntries(); } - public void setGeneralBloomFilterFaulty() { + private void setGeneralBloomFilterFaulty() { generalBloomFilter = null; } - public void setDeleteFamilyBloomFilterFaulty() { + private void setDeleteFamilyBloomFilterFaulty() { this.deleteFamilyBloomFilter = null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index b76867d1c223..17e0001fb0cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -154,8 +154,7 @@ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, Cach this.bloomType = BloomType.NONE; } - // initialize delete family Bloom filter when there is NO RowCol Bloom - // filter + // initialize delete family Bloom filter when there is NO RowCol Bloom filter if (this.bloomType != BloomType.ROWCOL) { this.deleteFamilyBloomFilterWriter = BloomFilterFactory.createDeleteBloomAtWrite(conf, cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java index 951f2e8d53f0..e0d4981a7f11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -129,39 +128,41 @@ public Result[] get(List gets) throws IOException { } static class RegionScannerToResultScannerAdaptor implements ResultScanner { - private static final Result[] EMPTY_RESULT_ARRAY = new Result[0]; - private final RegionScanner regionScanner; - RegionScannerToResultScannerAdaptor(final RegionScanner regionScanner) { - this.regionScanner = regionScanner; - } + private final RegionScanner scanner; - @Override - public Iterator iterator() { - throw new UnsupportedOperationException(); - } + private boolean moreRows = true; - @Override - public Result next() throws IOException { - List cells = new ArrayList<>(); - return regionScanner.next(cells) ? Result.create(cells) : null; + private final List cells = new ArrayList<>(); + + RegionScannerToResultScannerAdaptor(final RegionScanner scanner) { + this.scanner = scanner; } @Override - public Result[] next(int nbRows) throws IOException { - List results = new ArrayList<>(nbRows); - for (int i = 0; i < nbRows; i++) { - Result result = next(); - if (result == null) break; - results.add(result); + public Result next() throws IOException { + if (!moreRows) { + return null; + } + for (;;) { + moreRows = scanner.next(cells); + if (cells.isEmpty()) { + if (!moreRows) { + return null; + } else { + continue; + } + } + Result result = Result.create(cells); + cells.clear(); + return result; } - return results.toArray(EMPTY_RESULT_ARRAY); } @Override public void close() { try { - regionScanner.close(); + scanner.close(); } catch (IOException e) { throw new RuntimeException(e); } @@ -174,7 +175,7 @@ public boolean renewLease() { @Override public ScanMetrics getScanMetrics() { - throw new UnsupportedOperationException(); + return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBloomFilterFaulty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBloomFilterFaulty.java new file mode 100644 index 000000000000..3aac6b0f10f2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBloomFilterFaulty.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Scan.ReadType; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.hfile.BlockType; +import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * A UT to make sure that everything is fine when we fail to load bloom filter. + *

+ * See HBASE-27936 for more details. + */ +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestBloomFilterFaulty { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBloomFilterFaulty.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static final byte[] FAMILY = Bytes.toBytes("family"); + + private static final byte[] QUAL = Bytes.toBytes("qualifier"); + + private static final TableDescriptor TD = + TableDescriptorBuilder.newBuilder(TableName.valueOf("test")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY) + .setBloomFilterType(BloomType.ROWPREFIX_FIXED_LENGTH) + .setConfiguration("RowPrefixBloomFilter.prefix_length", "2").build()) + .build(); + + private static final RegionInfo RI = RegionInfoBuilder.newBuilder(TD.getTableName()).build(); + + @AfterClass + public static void tearDownAfterClass() { + UTIL.cleanupTestDir(); + } + + private HRegion region; + + @Rule + public final TestName name = new TestName(); + + private void generateHFiles() throws IOException { + for (int i = 0; i < 4; i++) { + long ts = EnvironmentEdgeManager.currentTime(); + for (int j = 0; j < 5; j++) { + byte[] row = Bytes.toBytes(j); + region.put(new Put(row).addColumn(FAMILY, QUAL, ts, Bytes.toBytes(i * 10 + j))); + region.delete(new Delete(row).addFamilyVersion(FAMILY, ts)); + } + + for (int j = 5; j < 10; j++) { + byte[] row = Bytes.toBytes(j); + region.put(new Put(row).addColumn(FAMILY, QUAL, ts + 1, Bytes.toBytes(i * 10 + j))); + } + + FlushResult result = region.flush(true); + if ( + result.getResult() == FlushResult.Result.CANNOT_FLUSH + || result.getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY + ) { + throw new IOException("Can not flush region, flush result: " + result); + } + } + } + + @Before + public void setUp() throws IOException { + Path rootDir = UTIL.getDataTestDir(name.getMethodName()); + // generate some hfiles so we can have StoreFileReader which has bloomfilters + region = HBaseTestingUtil.createRegionAndWAL(RI, rootDir, UTIL.getConfiguration(), TD); + generateHFiles(); + HStore store = region.getStore(FAMILY); + for (HStoreFile storefile : store.getStorefiles()) { + storefile.initReader(); + StoreFileReader reader = storefile.getReader(); + // make sure we load bloom filters correctly + assertNotNull(reader.generalBloomFilter); + assertNotNull(reader.deleteFamilyBloomFilter); + } + } + + @After + public void tearDown() throws IOException { + if (region != null) { + HBaseTestingUtil.closeRegionAndWAL(region); + } + } + + private void setFaulty(BlockType type) { + HStore store = region.getStore(FAMILY); + for (HStoreFile storefile : store.getStorefiles()) { + storefile.getReader().setBloomFilterFaulty(type); + } + } + + private void testGet() throws IOException { + for (int i = 0; i < 5; i++) { + assertTrue(region.get(new Get(Bytes.toBytes(i))).isEmpty()); + } + for (int i = 5; i < 10; i++) { + assertEquals(30 + i, + Bytes.toInt(region.get(new Get(Bytes.toBytes(i))).getValue(FAMILY, QUAL))); + } + } + + private void testStreamScan() throws IOException { + try (RegionAsTable table = new RegionAsTable(region); + ResultScanner scanner = table.getScanner(new Scan().setReadType(ReadType.STREAM))) { + for (int i = 5; i < 10; i++) { + Result result = scanner.next(); + assertEquals(i, Bytes.toInt(result.getRow())); + assertEquals(30 + i, Bytes.toInt(result.getValue(FAMILY, QUAL))); + } + assertNull(scanner.next()); + } + } + + private void testRegion() throws IOException { + // normal read + testGet(); + // scan with stream reader + testStreamScan(); + // major compact + region.compact(true); + // test read and scan again + testGet(); + testStreamScan(); + } + + @Test + public void testNoGeneralBloomFilter() throws IOException { + setFaulty(BlockType.GENERAL_BLOOM_META); + testRegion(); + } + + @Test + public void testNoDeleteFamilyBloomFilter() throws IOException { + setFaulty(BlockType.DELETE_FAMILY_BLOOM_META); + testRegion(); + } + + @Test + public void testNoAnyBloomFilter() throws IOException { + setFaulty(BlockType.GENERAL_BLOOM_META); + setFaulty(BlockType.DELETE_FAMILY_BLOOM_META); + testRegion(); + } +} From dbf3da197b13c00d681164659504b259c9d61279 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 23 Jun 2023 10:06:04 +0800 Subject: [PATCH 014/514] HBASE-27782 During SSL handshake error, netty complains that exceptionCaught() was not handled (#5305) Signed-off-by: Bryan Beaudreault --- .../ipc/BufferCallBeforeInitHandler.java | 39 ++++- .../hbase/ipc/TestTLSHandshadeFailure.java | 163 ++++++++++++++++++ 2 files changed, 200 insertions(+), 2 deletions(-) create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java index 6f8339895b5b..5bad42cd630b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java @@ -20,7 +20,10 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import javax.net.ssl.SSLException; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -33,6 +36,8 @@ @InterfaceAudience.Private class BufferCallBeforeInitHandler extends ChannelDuplexHandler { + private static final Logger LOG = LoggerFactory.getLogger(BufferCallBeforeInitHandler.class); + static final String NAME = "BufferCall"; private enum BufferCallAction { @@ -93,15 +98,18 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc for (Call call : id2Call.values()) { ctx.write(call); } + ctx.flush(); + ctx.pipeline().remove(this); break; case FAIL: for (Call call : id2Call.values()) { call.setException(bcEvt.error); } + // here we do not remove us from the pipeline, for receiving possible exceptions and log + // it, especially the ssl exceptions, to prevent it reaching the tail of the pipeline and + // generate a confusing netty WARN break; } - ctx.flush(); - ctx.pipeline().remove(this); } else if (evt instanceof CallEvent) { // just remove the call for now until we add other call event other than timeout and cancel. id2Call.remove(((CallEvent) evt).call.id); @@ -109,4 +117,31 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc ctx.fireUserEventTriggered(evt); } } + + private boolean isSslError(Throwable cause) { + Throwable error = cause; + do { + if (error instanceof SSLException) { + return true; + } + error = error.getCause(); + } while (error != null); + return false; + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (isSslError(cause)) { + // this should have been logged in other places, see HBASE-27782 for more details. + // here we just log it with debug and tell users that this is not a critical problem, + // otherwise if we just pass it through the pipeline, it will lead to a confusing + // "An exceptionCaught() event was fired, and it reached at the tail of the pipeline" + LOG.debug( + "got ssl exception, which should have already been proceeded, log it here to" + + " prevent it being passed to netty's TailContext and trigger a confusing WARN message", + cause); + } else { + ctx.fireExceptionCaught(cause); + } + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java new file mode 100644 index 000000000000..7375388e4a04 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.MetricsConnection.CallStats; +import org.apache.hadoop.hbase.io.crypto.tls.X509Util; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.NotSslRecordException; + +/** + * A simple UT to make sure that we do not leak the SslExceptions to netty's TailContext, where it + * will generate a confusing WARN message. + *

+ * See HBASE-27782 for more details. + */ +@Category({ ClientTests.class, SmallTests.class }) +public class TestTLSHandshadeFailure { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTLSHandshadeFailure.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestTLSHandshadeFailure.class); + + private final Configuration conf = HBaseConfiguration.create(); + + // use a pre set seed to make the random bytes stable + private final Random rand = new Random(1); + + private ServerSocket server; + + private Thread serverThread; + + private NettyRpcClient client; + + private org.apache.logging.log4j.core.Appender mockAppender; + + private void serve() { + Socket socket = null; + try { + socket = server.accept(); + byte[] bytes = new byte[128]; + rand.nextBytes(bytes); + socket.getOutputStream().write(bytes); + socket.getOutputStream().flush(); + } catch (Exception e) { + LOG.warn("failed to process request", e); + } finally { + if (socket != null) { + try { + socket.close(); + } catch (IOException e1) { + LOG.warn("failed to close socket"); + } + } + } + } + + @Before + public void setUp() throws IOException { + server = new ServerSocket(0); + serverThread = new Thread(this::serve); + serverThread.setDaemon(true); + serverThread.setName("Error-Server-Thread"); + serverThread.start(); + conf.setBoolean(X509Util.HBASE_CLIENT_NETTY_TLS_ENABLED, true); + client = new NettyRpcClient(conf); + + mockAppender = mock(org.apache.logging.log4j.core.Appender.class); + when(mockAppender.getName()).thenReturn("mockAppender"); + when(mockAppender.isStarted()).thenReturn(true); + ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager + .getLogger(BufferCallBeforeInitHandler.class)).addAppender(mockAppender); + } + + @After + public void tearDown() throws IOException { + ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager + .getLogger(BufferCallBeforeInitHandler.class)).removeAppender(mockAppender); + Closeables.close(client, true); + Closeables.close(server, true); + } + + @Test + public void test() throws Exception { + AtomicReference level = new AtomicReference<>(); + AtomicReference msg = new AtomicReference(); + doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + org.apache.logging.log4j.core.LogEvent logEvent = + invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); + level.set(logEvent.getLevel()); + msg.set(logEvent.getMessage().getFormattedMessage()); + return null; + } + }).when(mockAppender).append(any()); + ConnectionId id = new ConnectionId(User.getCurrent(), "test", + Address.fromParts("127.0.0.1", server.getLocalPort())); + NettyRpcConnection conn = client.createConnection(id); + BlockingRpcCallback done = new BlockingRpcCallback<>(); + Call call = new Call(1, null, null, null, null, 0, 0, done, new CallStats()); + HBaseRpcController hrc = new HBaseRpcControllerImpl(); + conn.sendRequest(call, hrc); + done.get(); + assertThat(call.error, instanceOf(NotSslRecordException.class)); + Waiter.waitFor(conf, 5000, () -> msg.get() != null); + verify(mockAppender).append(any()); + // make sure that it has been logged by BufferCallBeforeInitHandler + assertEquals(org.apache.logging.log4j.Level.DEBUG, level.get()); + assertThat(msg.get(), + startsWith("got ssl exception, which should have already been proceeded")); + } +} From 66fd6db9c536010b7df09c8beb1fce15ac05b19e Mon Sep 17 00:00:00 2001 From: Hernan Romer Date: Fri, 23 Jun 2023 09:47:44 -0400 Subject: [PATCH 015/514] HBASE-27950 ClientSideRegionScanner does not adhere to RegionScanner.nextRaw contract (#5304) Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- .../hbase/client/ClientSideRegionScanner.java | 14 +-- .../client/TestClientSideRegionScanner.java | 88 +++++++++++++++++++ 2 files changed, 96 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 4a8dd1d3ac86..191910441409 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -48,6 +48,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { private HRegion region; RegionScanner scanner; List values; + boolean hasMore = true; public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir, TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) throws IOException { @@ -90,12 +91,13 @@ public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir, @Override public Result next() throws IOException { - values.clear(); - scanner.nextRaw(values); - if (values.isEmpty()) { - // we are done - return null; - } + do { + if (!hasMore) { + return null; + } + values.clear(); + this.hasMore = scanner.nextRaw(values); + } while (values.isEmpty()); Result result = Result.create(values); if (this.scanMetrics != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java index 32ca3dde8c0b..6da74bf031de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java @@ -17,22 +17,34 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import java.io.IOException; +import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache; +import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -47,6 +59,8 @@ public class TestClientSideRegionScanner { HBaseClassTestRule.forClass(TestClientSideRegionScanner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final TableName TABLE_NAME = TableName.valueOf("test"); + private static final byte[] FAM_NAME = Bytes.toBytes("f"); private Configuration conf; private Path rootDir; @@ -113,4 +127,78 @@ public void testNoBlockCache() throws IOException { BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); assertNull(blockCache); } + + @Test + public void testContinuesToScanIfHasMore() throws IOException { + // Conditions for this test to set up RegionScannerImpl to bail on the scan + // after a single iteration + // 1. Configure preadMaxBytes to something small to trigger scannerContext#returnImmediately + // 2. Configure a filter to filter out some rows, in this case rows with values < 5 + // 3. Configure the filter's hasFilterRow to return true so RegionScannerImpl sets + // the limitScope to something with a depth of 0, so we bail on the scan after the first + // iteration + + Configuration copyConf = new Configuration(conf); + copyConf.setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 1); + Scan scan = new Scan(); + scan.setFilter(new FiltersRowsLessThan5()); + scan.setLimit(1); + + try (Table table = TEST_UTIL.createTable(TABLE_NAME, FAM_NAME)) { + TableDescriptor htd = TEST_UTIL.getAdmin().getDescriptor(TABLE_NAME); + RegionInfo hri = TEST_UTIL.getAdmin().getRegions(TABLE_NAME).get(0); + + for (int i = 0; i < 10; ++i) { + table.put(createPut(i)); + } + + // Flush contents to disk so we can scan the fs + TEST_UTIL.getAdmin().flush(TABLE_NAME); + + ClientSideRegionScanner clientSideRegionScanner = + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); + RegionScanner scannerSpy = spy(clientSideRegionScanner.scanner); + clientSideRegionScanner.scanner = scannerSpy; + Result result = clientSideRegionScanner.next(); + + verify(scannerSpy, times(6)).nextRaw(anyList()); + assertNotNull(result); + assertEquals(Bytes.toInt(result.getRow()), 5); + assertTrue(clientSideRegionScanner.hasMore); + + for (int i = 6; i < 10; ++i) { + result = clientSideRegionScanner.next(); + verify(scannerSpy, times(i + 1)).nextRaw(anyList()); + assertNotNull(result); + assertEquals(Bytes.toInt(result.getRow()), i); + } + + result = clientSideRegionScanner.next(); + assertNull(result); + assertFalse(clientSideRegionScanner.hasMore); + } + } + + private static Put createPut(int rowAsInt) { + byte[] row = Bytes.toBytes(rowAsInt); + Put put = new Put(row); + put.addColumn(FAM_NAME, row, row); + return put; + } + + private static class FiltersRowsLessThan5 extends FilterBase { + + @Override + public boolean filterRowKey(Cell cell) { + byte[] rowKey = Arrays.copyOfRange(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength() + cell.getRowOffset()); + int intValue = Bytes.toInt(rowKey); + return intValue < 5; + } + + @Override + public boolean hasFilterRow() { + return true; + } + } } From 0637bbc74b06dce80a474831c6b2ce356bf8ed35 Mon Sep 17 00:00:00 2001 From: Jing Yu Date: Mon, 26 Jun 2023 20:47:27 -0400 Subject: [PATCH 016/514] HBASE-27892 Report memstore on-heap and off-heap size as jmx metrics (#5308) Signed-off-by: Viraj Jasani --- .../hadoop/hbase/regionserver/MetricsRegionServerSource.java | 4 ++++ .../hbase/regionserver/MetricsRegionServerSourceImpl.java | 4 ++++ .../hadoop/hbase/regionserver/TestMetricsRegionServer.java | 2 ++ 3 files changed, 10 insertions(+) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 120e4655eeda..a53899c476fa 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -238,6 +238,10 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String MAX_COMPACTED_STORE_FILE_REF_COUNT = "maxCompactedStoreFileRefCount"; String MEMSTORE_SIZE = "memStoreSize"; String MEMSTORE_SIZE_DESC = "Size of the memstore"; + String MEMSTORE_HEAP_SIZE = "memStoreHeapSize"; + String MEMSTORE_HEAP_SIZE_DESC = "On-heap Size of the memstore"; + String MEMSTORE_OFFHEAP_SIZE = "memStoreOffHeapSize"; + String MEMSTORE_OFFHEAP_SIZE_DESC = "Off-heap Size of the memstore"; String STOREFILE_SIZE = "storeFileSize"; String STOREFILE_SIZE_GROWTH_RATE = "storeFileSizeGrowthRate"; String MAX_STORE_FILE_AGE = "maxStoreFileAge"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index ee5ce34f6b60..bfac0843e331 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -520,6 +520,10 @@ private MetricsRecordBuilder addGaugesToMetricsRecordBuilder(MetricsRecordBuilde .addGauge(Interns.info(MAX_STOREFILE_COUNT, MAX_STOREFILE_COUNT_DESC), rsWrap.getMaxStoreFiles()) .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) + .addGauge(Interns.info(MEMSTORE_HEAP_SIZE, MEMSTORE_HEAP_SIZE_DESC), + rsWrap.getOnHeapMemStoreSize()) + .addGauge(Interns.info(MEMSTORE_OFFHEAP_SIZE, MEMSTORE_OFFHEAP_SIZE_DESC), + rsWrap.getOffHeapMemStoreSize()) .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) .addGauge(Interns.info(STOREFILE_SIZE_GROWTH_RATE, STOREFILE_SIZE_GROWTH_RATE_DESC), rsWrap.getStoreFileSizeGrowthRate()) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index 49ce16c87f98..e1cc080f753d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -83,6 +83,8 @@ public void testWrapperSource() { HELPER.assertGauge("hlogFileSize", 1024000, serverSource); HELPER.assertGauge("storeFileCount", 300, serverSource); HELPER.assertGauge("memstoreSize", 1025, serverSource); + HELPER.assertGauge("memstoreHeapSize", 500, serverSource); + HELPER.assertGauge("memstoreOffHeapSize", 600, serverSource); HELPER.assertGauge("storeFileSize", 1900, serverSource); HELPER.assertGauge("storeFileSizeGrowthRate", 50.0, serverSource); HELPER.assertCounter("totalRequestCount", 899, serverSource); From 9e8e43864ce5cd47751c23574a4cc407962617bd Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 28 Jun 2023 12:07:55 -0700 Subject: [PATCH 017/514] HBASE-27951 Use ADMIN_QOS in MasterRpcServices for regionserver operational dependencies (#5309) It seems not correct to have the MasterRpcServices methods for direct regionserver<->master communication (regionServerStartup, regionServerReport, and reportFatalRSError) contending with normal priority requests. They should be made ADMIN_QOS priority to avoid potential operational deadlocks. Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani Reviewed-by: Aman Poonia --- .../java/org/apache/hadoop/hbase/master/MasterRpcServices.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index a2d5e8a16ecd..2e416f5e1a07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -618,6 +618,7 @@ public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController c } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public RegionServerReportResponse regionServerReport(RpcController controller, RegionServerReportRequest request) throws ServiceException { try { @@ -653,6 +654,7 @@ public RegionServerReportResponse regionServerReport(RpcController controller, } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public RegionServerStartupResponse regionServerStartup(RpcController controller, RegionServerStartupRequest request) throws ServiceException { // Register with server manager @@ -684,6 +686,7 @@ public RegionServerStartupResponse regionServerStartup(RpcController controller, } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller, ReportRSFatalErrorRequest request) throws ServiceException { String errorText = request.getErrorMessage(); From 25455b6fe3cbd8f093fd9bc8c51a1bab95353a62 Mon Sep 17 00:00:00 2001 From: Jing Yu Date: Thu, 29 Jun 2023 03:14:23 -0400 Subject: [PATCH 018/514] HBASE-27948 Report memstore on-heap and off-heap size as jmx metrics in sub=Memory bean (#5293) Signed-off-by: Viraj Jasani --- .../MetricsHeapMemoryManagerSource.java | 19 +++++++++++++++++++ .../MetricsHeapMemoryManagerSourceImpl.java | 16 ++++++++++++++++ .../hbase/regionserver/HeapMemoryManager.java | 7 +++++-- .../MetricsHeapMemoryManager.java | 16 ++++++++++++++++ .../regionserver/RegionServerAccounting.java | 4 ++-- .../TestMetricsHeapMemoryManager.java | 4 ++++ 6 files changed, 62 insertions(+), 4 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java index d9e972ba1fb6..ef130fbece4e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java @@ -70,6 +70,18 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { */ void setCurMemStoreSizeGauge(long memStoreSize); + /** + * Set the current global memstore on-heap size used gauge + * @param memStoreOnHeapSize the current memory usage in memstore on-heap, in bytes. + */ + void setCurMemStoreOnHeapSizeGauge(long memStoreOnHeapSize); + + /** + * Set the current global memstore off-heap size used gauge + * @param memStoreOffHeapSize the current memory usage in memstore off-heap, in bytes. + */ + void setCurMemStoreOffHeapSizeGauge(long memStoreOffHeapSize); + /** * Update the increase/decrease memstore size histogram * @param memStoreDeltaSize the tuning result of memstore. @@ -118,6 +130,13 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { String UNBLOCKED_FLUSH_GAUGE_DESC = "Gauge for the unblocked flush count before tuning"; String MEMSTORE_SIZE_GAUGE_NAME = "memStoreSize"; String MEMSTORE_SIZE_GAUGE_DESC = "Global MemStore used in bytes by the RegionServer"; + String MEMSTORE_ONHEAP_SIZE_GAUGE_NAME = "memStoreOnHeapSize"; + String MEMSTORE_ONHEAP_SIZE_GAUGE_DESC = + "Global MemStore On-heap size in bytes by the RegionServer"; + String MEMSTORE_OFFHEAP_SIZE_GAUGE_NAME = "memStoreOffHeapSize"; + String MEMSTORE_OFFHEAP_SIZE_GAUGE_DESC = + "Global MemStore Off-heap size in bytes by the RegionServer"; + String BLOCKCACHE_SIZE_GAUGE_NAME = "blockCacheSize"; String BLOCKCACHE_SIZE_GAUGE_DESC = "BlockCache used in bytes by the RegionServer"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java index c2e8d329143c..e8967246dd75 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java @@ -41,6 +41,8 @@ public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl private final MutableGaugeLong blockedFlushGauge; private final MutableGaugeLong unblockedFlushGauge; private final MutableGaugeLong memStoreSizeGauge; + private final MutableGaugeLong memStoreOnHeapSizeGauge; + private final MutableGaugeLong memStoreOffHeapSizeGauge; private final MutableGaugeLong blockCacheSizeGauge; private final MutableFastCounter doNothingCounter; @@ -75,6 +77,10 @@ public MetricsHeapMemoryManagerSourceImpl(String metricsName, String metricsDesc getMetricsRegistry().newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); memStoreSizeGauge = getMetricsRegistry().newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); + memStoreOnHeapSizeGauge = getMetricsRegistry().newGauge(MEMSTORE_ONHEAP_SIZE_GAUGE_NAME, + MEMSTORE_ONHEAP_SIZE_GAUGE_DESC, 0L); + memStoreOffHeapSizeGauge = getMetricsRegistry().newGauge(MEMSTORE_OFFHEAP_SIZE_GAUGE_NAME, + MEMSTORE_OFFHEAP_SIZE_GAUGE_DESC, 0L); blockCacheSizeGauge = getMetricsRegistry().newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); @@ -111,6 +117,16 @@ public void setCurMemStoreSizeGauge(long memstoreSize) { memStoreSizeGauge.set(memstoreSize); } + @Override + public void setCurMemStoreOnHeapSizeGauge(long memstoreOnHeapSize) { + memStoreOnHeapSizeGauge.set(memstoreOnHeapSize); + } + + @Override + public void setCurMemStoreOffHeapSizeGauge(long memstoreOffHeapSize) { + memStoreOffHeapSizeGauge.set(memstoreOffHeapSize); + } + @Override public void updateMemStoreDeltaSizeHistogram(int memStoreDeltaSize) { if (memStoreDeltaSize >= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index 7a72b9af41cf..fe2737b0a7d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -317,12 +317,15 @@ private void tune() { unblockedFlushCnt = unblockedFlushCount.getAndSet(0); tunerContext.setUnblockedFlushCount(unblockedFlushCnt); metricsHeapMemoryManager.updateUnblockedFlushCount(unblockedFlushCnt); - // TODO : add support for offheap metrics tunerContext.setCurBlockCacheUsed((float) blockCache.getCurrentSize() / maxHeapSize); metricsHeapMemoryManager.setCurBlockCacheSizeGauge(blockCache.getCurrentSize()); + long globalMemstoreDataSize = regionServerAccounting.getGlobalMemStoreDataSize(); long globalMemstoreHeapSize = regionServerAccounting.getGlobalMemStoreHeapSize(); + long globalMemStoreOffHeapSize = regionServerAccounting.getGlobalMemStoreOffHeapSize(); tunerContext.setCurMemStoreUsed((float) globalMemstoreHeapSize / maxHeapSize); - metricsHeapMemoryManager.setCurMemStoreSizeGauge(globalMemstoreHeapSize); + metricsHeapMemoryManager.setCurMemStoreSizeGauge(globalMemstoreDataSize); + metricsHeapMemoryManager.setCurMemStoreOnHeapSizeGauge(globalMemstoreHeapSize); + metricsHeapMemoryManager.setCurMemStoreOffHeapSizeGauge(globalMemStoreOffHeapSize); tunerContext.setCurBlockCacheSize(blockCachePercent); tunerContext.setCurMemStoreSize(globalMemStorePercent); TunerResult result = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManager.java index e781bddb2274..2f3e43fccc93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManager.java @@ -73,6 +73,22 @@ public void setCurMemStoreSizeGauge(final long memStoreSize) { source.setCurMemStoreSizeGauge(memStoreSize); } + /** + * Set the current global memstore on-heap size gauge + * @param memStoreOnHeapSize the current memory on-heap size in memstore, in bytes. + */ + public void setCurMemStoreOnHeapSizeGauge(final long memStoreOnHeapSize) { + source.setCurMemStoreOnHeapSizeGauge(memStoreOnHeapSize); + } + + /** + * Set the current global memstore off-heap size gauge + * @param memStoreOffHeapSize the current memory off-heap size in memstore, in bytes. + */ + public void setCurMemStoreOffHeapSizeGauge(final long memStoreOffHeapSize) { + source.setCurMemStoreOffHeapSizeGauge(memStoreOffHeapSize); + } + /** * Update the increase/decrease memstore size histogram * @param memStoreDeltaSize the tuning result of memstore. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java index c1706c995e7e..b26611cb4265 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java @@ -121,12 +121,12 @@ public long getGlobalMemStoreDataSize() { return globalMemStoreDataSize.sum(); } - /** Returns the global memstore heap size in the RegionServer */ + /** Returns the global memstore on-heap size in the RegionServer */ public long getGlobalMemStoreHeapSize() { return this.globalMemStoreHeapSize.sum(); } - /** Returns the global memstore heap size in the RegionServer */ + /** Returns the global memstore off-heap size in the RegionServer */ public long getGlobalMemStoreOffHeapSize() { return this.globalMemStoreOffHeapSize.sum(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java index 2d6a47fc2122..974d6485377e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java @@ -74,11 +74,15 @@ public void testGauge() { hmm.updateBlockedFlushCount(200); hmm.updateUnblockedFlushCount(50); hmm.setCurMemStoreSizeGauge(256 * 1024 * 1024); + hmm.setCurMemStoreOnHeapSizeGauge(512 * 1024 * 1024); + hmm.setCurMemStoreOffHeapSizeGauge(128 * 1024 * 1024); hmm.setCurBlockCacheSizeGauge(100 * 1024 * 1024); HELPER.assertGauge("blockedFlushGauge", 200, source); HELPER.assertGauge("unblockedFlushGauge", 50, source); HELPER.assertGauge("memStoreSize", 256 * 1024 * 1024, source); + HELPER.assertGauge("memStoreOnHeapSize", 512 * 1024 * 1024, source); + HELPER.assertGauge("memStoreOffHeapSize", 128 * 1024 * 1024, source); HELPER.assertGauge("blockCacheSize", 100 * 1024 * 1024, source); } } From bba2f98b348af3bdbe7c797a758d4cf7e09dc098 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Fri, 30 Jun 2023 15:58:33 -0400 Subject: [PATCH 019/514] HBASE-27798 Client side should back off based on wait interval in RpcThrottlingException (#5275) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang --- .../client/AsyncBatchRpcRetryingCaller.java | 51 +-- .../hbase/client/AsyncRpcRetryingCaller.java | 33 +- ...syncScanSingleRegionRpcRetryingCaller.java | 37 +- .../hadoop/hbase/client/ConnectionUtils.java | 2 +- .../HBaseServerExceptionPauseManager.java | 92 +++++ .../TestHBaseServerExceptionPauseManager.java | 139 +++++++ .../TestAsyncClientPauseForRpcThrottling.java | 380 ++++++++++++++++++ 7 files changed, 656 insertions(+), 78 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/HBaseServerExceptionPauseManager.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/backoff/TestHBaseServerExceptionPauseManager.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index 49cf75892072..7a8bbeb9420b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -18,9 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.CellUtil.createCellScanner; -import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; import static org.apache.hadoop.hbase.client.ConnectionUtils.calcPriority; -import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime; import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController; import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; @@ -35,6 +33,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; @@ -56,6 +55,7 @@ import org.apache.hadoop.hbase.client.MultiResponse.RegionResult; import org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; +import org.apache.hadoop.hbase.client.backoff.HBaseServerExceptionPauseManager; import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.util.Bytes; @@ -102,10 +102,6 @@ class AsyncBatchRpcRetryingCaller { private final IdentityHashMap> action2Errors; - private final long pauseNs; - - private final long pauseNsForServerOverloaded; - private final int maxAttempts; private final long operationTimeoutNs; @@ -116,6 +112,8 @@ class AsyncBatchRpcRetryingCaller { private final long startNs; + private final HBaseServerExceptionPauseManager pauseManager; + // we can not use HRegionLocation as the map key because the hashCode and equals method of // HRegionLocation only consider serverName. private static final class RegionRequest { @@ -155,8 +153,6 @@ public AsyncBatchRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, this.retryTimer = retryTimer; this.conn = conn; this.tableName = tableName; - this.pauseNs = pauseNs; - this.pauseNsForServerOverloaded = pauseNsForServerOverloaded; this.maxAttempts = maxAttempts; this.operationTimeoutNs = operationTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs; @@ -182,6 +178,8 @@ public AsyncBatchRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, } this.action2Errors = new IdentityHashMap<>(); this.startNs = System.nanoTime(); + this.pauseManager = + new HBaseServerExceptionPauseManager(pauseNs, pauseNsForServerOverloaded, operationTimeoutNs); } private static boolean hasIncrementOrAppend(Row action) { @@ -204,10 +202,6 @@ private static boolean hasIncrementOrAppend(RowMutations mutations) { return false; } - private long remainingTimeNs() { - return operationTimeoutNs - (System.nanoTime() - startNs); - } - private List removeErrors(Action action) { synchronized (action2Errors) { return action2Errors.remove(action); @@ -360,14 +354,14 @@ private void onComplete(Map actionsByRegion, int tries, } }); if (!failedActions.isEmpty()) { - tryResubmit(failedActions.stream(), tries, retryImmediately.booleanValue(), false); + tryResubmit(failedActions.stream(), tries, retryImmediately.booleanValue(), null); } } private void sendToServer(ServerName serverName, ServerRequest serverReq, int tries) { long remainingNs; if (operationTimeoutNs > 0) { - remainingNs = remainingTimeNs(); + remainingNs = pauseManager.remainingTimeNs(startNs); if (remainingNs <= 0) { failAll(serverReq.actionsByRegion.values().stream().flatMap(r -> r.actions.stream()), tries); @@ -465,30 +459,23 @@ private void onError(Map actionsByRegion, int tries, Thro List copiedActions = actionsByRegion.values().stream().flatMap(r -> r.actions.stream()) .collect(Collectors.toList()); addError(copiedActions, error, serverName); - tryResubmit(copiedActions.stream(), tries, error instanceof RetryImmediatelyException, - HBaseServerException.isServerOverloaded(error)); + tryResubmit(copiedActions.stream(), tries, error instanceof RetryImmediatelyException, error); } private void tryResubmit(Stream actions, int tries, boolean immediately, - boolean isServerOverloaded) { + Throwable error) { if (immediately) { groupAndSend(actions, tries); return; } - long delayNs; - long pauseNsToUse = isServerOverloaded ? pauseNsForServerOverloaded : pauseNs; - if (operationTimeoutNs > 0) { - long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; - if (maxDelayNs <= 0) { - failAll(actions, tries); - return; - } - delayNs = Math.min(maxDelayNs, getPauseTime(pauseNsToUse, tries - 1)); - } else { - delayNs = getPauseTime(pauseNsToUse, tries - 1); - } - if (isServerOverloaded) { + OptionalLong maybePauseNsToUse = pauseManager.getPauseNsFromException(error, tries, startNs); + if (!maybePauseNsToUse.isPresent()) { + failAll(actions, tries); + return; + } + long delayNs = maybePauseNsToUse.getAsLong(); + if (HBaseServerException.isServerOverloaded(error)) { Optional metrics = conn.getConnectionMetrics(); metrics.ifPresent(m -> m.incrementServerOverloadedBackoffTime(delayNs, TimeUnit.NANOSECONDS)); } @@ -498,7 +485,7 @@ private void tryResubmit(Stream actions, int tries, boolean immediately, private void groupAndSend(Stream actions, int tries) { long locateTimeoutNs; if (operationTimeoutNs > 0) { - locateTimeoutNs = remainingTimeNs(); + locateTimeoutNs = pauseManager.remainingTimeNs(startNs); if (locateTimeoutNs <= 0) { failAll(actions, tries); return; @@ -529,7 +516,7 @@ private void groupAndSend(Stream actions, int tries) { sendOrDelay(actionsByServer, tries); } if (!locateFailed.isEmpty()) { - tryResubmit(locateFailed.stream(), tries, false, false); + tryResubmit(locateFailed.stream(), tries, false, null); } }); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index 04e227108388..8b317bfec2c2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; -import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime; import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController; import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException; import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.backoff.HBaseServerExceptionPauseManager; import org.apache.hadoop.hbase.exceptions.ScannerResetException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -56,10 +56,6 @@ public abstract class AsyncRpcRetryingCaller { private final long startNs; - private final long pauseNs; - - private final long pauseNsForServerOverloaded; - private int tries = 1; private final int maxAttempts; @@ -78,14 +74,14 @@ public abstract class AsyncRpcRetryingCaller { protected final HBaseRpcController controller; + private final HBaseServerExceptionPauseManager pauseManager; + public AsyncRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int priority, long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.conn = conn; this.priority = priority; - this.pauseNs = pauseNs; - this.pauseNsForServerOverloaded = pauseNsForServerOverloaded; this.maxAttempts = maxAttempts; this.operationTimeoutNs = operationTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs; @@ -95,6 +91,8 @@ public AsyncRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int pr this.controller.setPriority(priority); this.exceptions = new ArrayList<>(); this.startNs = System.nanoTime(); + this.pauseManager = + new HBaseServerExceptionPauseManager(pauseNs, pauseNsForServerOverloaded, operationTimeoutNs); } private long elapsedMs() { @@ -102,7 +100,7 @@ private long elapsedMs() { } protected final long remainingTimeNs() { - return operationTimeoutNs - (System.nanoTime() - startNs); + return pauseManager.remainingTimeNs(startNs); } protected final void completeExceptionally() { @@ -125,19 +123,12 @@ protected final void resetCallTimeout() { } private void tryScheduleRetry(Throwable error) { - long pauseNsToUse = - HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; - long delayNs; - if (operationTimeoutNs > 0) { - long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; - if (maxDelayNs <= 0) { - completeExceptionally(); - return; - } - delayNs = Math.min(maxDelayNs, getPauseTime(pauseNsToUse, tries - 1)); - } else { - delayNs = getPauseTime(pauseNsToUse, tries - 1); + OptionalLong maybePauseNsToUse = pauseManager.getPauseNsFromException(error, tries, startNs); + if (!maybePauseNsToUse.isPresent()) { + completeExceptionally(); + return; } + long delayNs = maybePauseNsToUse.getAsLong(); tries++; if (HBaseServerException.isServerOverloaded(error)) { Optional metrics = conn.getConnectionMetrics(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index 3ef7b9b6cccc..ca39051de84d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; -import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime; import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCCallsMetrics; import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCRetriesMetrics; import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan; @@ -34,6 +32,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -43,6 +42,7 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer.ScanResumer; +import org.apache.hadoop.hbase.client.backoff.HBaseServerExceptionPauseManager; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; @@ -99,10 +99,6 @@ class AsyncScanSingleRegionRpcRetryingCaller { private final long scannerLeaseTimeoutPeriodNs; - private final long pauseNs; - - private final long pauseNsForServerOverloaded; - private final int maxAttempts; private final long scanTimeoutNs; @@ -131,6 +127,8 @@ class AsyncScanSingleRegionRpcRetryingCaller { private long nextCallSeq = -1L; + private final HBaseServerExceptionPauseManager pauseManager; + private enum ScanControllerState { INITIALIZED, SUSPENDED, @@ -330,8 +328,6 @@ public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionI this.loc = loc; this.regionServerRemote = isRegionServerRemote; this.scannerLeaseTimeoutPeriodNs = scannerLeaseTimeoutPeriodNs; - this.pauseNs = pauseNs; - this.pauseNsForServerOverloaded = pauseNsForServerOverloaded; this.maxAttempts = maxAttempts; this.scanTimeoutNs = scanTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs; @@ -346,16 +342,14 @@ public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionI this.controller = conn.rpcControllerFactory.newController(); this.controller.setPriority(priority); this.exceptions = new ArrayList<>(); + this.pauseManager = + new HBaseServerExceptionPauseManager(pauseNs, pauseNsForServerOverloaded, scanTimeoutNs); } private long elapsedMs() { return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - nextCallStartNs); } - private long remainingTimeNs() { - return scanTimeoutNs - (System.nanoTime() - nextCallStartNs); - } - private void closeScanner() { incRPCCallsMetrics(scanMetrics, regionServerRemote); resetController(controller, rpcTimeoutNs, HConstants.HIGH_QOS); @@ -418,19 +412,14 @@ private void onError(Throwable error) { completeExceptionally(!scannerClosed); return; } - long delayNs; - long pauseNsToUse = - HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; - if (scanTimeoutNs > 0) { - long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; - if (maxDelayNs <= 0) { - completeExceptionally(!scannerClosed); - return; - } - delayNs = Math.min(maxDelayNs, getPauseTime(pauseNsToUse, tries - 1)); - } else { - delayNs = getPauseTime(pauseNsToUse, tries - 1); + + OptionalLong maybePauseNsToUse = + pauseManager.getPauseNsFromException(error, tries, nextCallStartNs); + if (!maybePauseNsToUse.isPresent()) { + completeExceptionally(!scannerClosed); + return; } + long delayNs = maybePauseNsToUse.getAsLong(); if (scannerClosed) { completeWhenError(false); return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 819c8db40181..4732da6f04ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -255,7 +255,7 @@ static Result filterCells(Result result, Cell keepCellsAfter) { } // Add a delta to avoid timeout immediately after a retry sleeping. - static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1); + public static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1); static Get toCheckExistenceOnly(Get get) { if (get.isCheckExistenceOnly()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/HBaseServerExceptionPauseManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/HBaseServerExceptionPauseManager.java new file mode 100644 index 000000000000..67f46822fe39 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/HBaseServerExceptionPauseManager.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.backoff; + +import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; +import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime; + +import java.util.OptionalLong; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.HBaseServerException; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class HBaseServerExceptionPauseManager { + private static final Logger LOG = LoggerFactory.getLogger(HBaseServerExceptionPauseManager.class); + + private final long pauseNs; + private final long pauseNsForServerOverloaded; + private final long timeoutNs; + + public HBaseServerExceptionPauseManager(long pauseNs, long pauseNsForServerOverloaded, + long timeoutNs) { + this.pauseNs = pauseNs; + this.pauseNsForServerOverloaded = pauseNsForServerOverloaded; + this.timeoutNs = timeoutNs; + } + + /** + * Returns the nanos, if any, for which the client should wait + * @param error The exception from the server + * @param tries The current retry count + * @return The time, in nanos, to pause. If empty then pausing would exceed our timeout, so we + * should throw now + */ + public OptionalLong getPauseNsFromException(Throwable error, int tries, long startNs) { + long expectedSleepNs; + long remainingTimeNs = remainingTimeNs(startNs) - SLEEP_DELTA_NS; + if (error instanceof RpcThrottlingException) { + RpcThrottlingException rpcThrottlingException = (RpcThrottlingException) error; + expectedSleepNs = TimeUnit.MILLISECONDS.toNanos(rpcThrottlingException.getWaitInterval()); + if (expectedSleepNs > remainingTimeNs && remainingTimeNs > 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("RpcThrottlingException suggested pause of {}ns which would exceed " + + "the timeout. We should throw instead.", expectedSleepNs, rpcThrottlingException); + } + return OptionalLong.empty(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Sleeping for {}ns after catching RpcThrottlingException", expectedSleepNs, + rpcThrottlingException); + } + } else { + expectedSleepNs = + HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; + // RpcThrottlingException tells us exactly how long the client should wait for, + // so we should not factor in the retry count for said exception + expectedSleepNs = getPauseTime(expectedSleepNs, tries - 1); + } + + if (timeoutNs > 0) { + if (remainingTimeNs <= 0) { + return OptionalLong.empty(); + } + expectedSleepNs = Math.min(remainingTimeNs, expectedSleepNs); + } + + return OptionalLong.of(expectedSleepNs); + } + + public long remainingTimeNs(long startNs) { + return timeoutNs - (System.nanoTime() - startNs); + } + +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/backoff/TestHBaseServerExceptionPauseManager.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/backoff/TestHBaseServerExceptionPauseManager.java new file mode 100644 index 000000000000..ee4ee47f1850 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/backoff/TestHBaseServerExceptionPauseManager.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.backoff; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.OptionalLong; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseServerException; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, SmallTests.class }) +public class TestHBaseServerExceptionPauseManager { + + private static final long WAIT_INTERVAL_MILLIS = 1L; + private static final long WAIT_INTERVAL_NANOS = + TimeUnit.MILLISECONDS.toNanos(WAIT_INTERVAL_MILLIS); + private static final long PAUSE_NANOS_FOR_SERVER_OVERLOADED = WAIT_INTERVAL_NANOS * 3; + + private static final long PAUSE_NANOS = WAIT_INTERVAL_NANOS * 2; + + private final RpcThrottlingException RPC_THROTTLING_EXCEPTION = new RpcThrottlingException( + RpcThrottlingException.Type.NumRequestsExceeded, WAIT_INTERVAL_MILLIS, "doot"); + private final Throwable OTHER_EXCEPTION = new RuntimeException(""); + private final HBaseServerException SERVER_OVERLOADED_EXCEPTION = new HBaseServerException(true); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHBaseServerExceptionPauseManager.class); + + @Test + public void itSupportsRpcThrottlingNanosNoTimeout() { + HBaseServerExceptionPauseManager pauseManager = + new HBaseServerExceptionPauseManager(PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, 0); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(RPC_THROTTLING_EXCEPTION, 1, System.nanoTime()); + + assertTrue(pauseNanos.isPresent()); + assertEquals(pauseNanos.getAsLong(), WAIT_INTERVAL_NANOS); + } + + @Test + public void itSupportsRpcThrottlingNanosLenientTimeout() { + HBaseServerExceptionPauseManager pauseManager = new HBaseServerExceptionPauseManager( + PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, System.nanoTime() * 2); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(RPC_THROTTLING_EXCEPTION, 1, System.nanoTime()); + + assertTrue(pauseNanos.isPresent()); + assertEquals(pauseNanos.getAsLong(), WAIT_INTERVAL_NANOS); + } + + @Test + public void itSupportsServerOverloadedExceptionNanos() { + HBaseServerExceptionPauseManager pauseManager = + new HBaseServerExceptionPauseManager(PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, 0); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(SERVER_OVERLOADED_EXCEPTION, 1, System.nanoTime()); + + assertTrue(pauseNanos.isPresent()); + // account for 1% jitter in pause time + assertTrue(pauseNanos.getAsLong() >= PAUSE_NANOS_FOR_SERVER_OVERLOADED * 0.99); + assertTrue(pauseNanos.getAsLong() <= PAUSE_NANOS_FOR_SERVER_OVERLOADED * 1.01); + } + + @Test + public void itSupportsOtherExceptionNanos() { + HBaseServerExceptionPauseManager pauseManager = + new HBaseServerExceptionPauseManager(PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, 0); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(OTHER_EXCEPTION, 1, System.nanoTime()); + + assertTrue(pauseNanos.isPresent()); + // account for 1% jitter in pause time + assertTrue(pauseNanos.getAsLong() >= PAUSE_NANOS * 0.99); + assertTrue(pauseNanos.getAsLong() <= PAUSE_NANOS * 1.01); + } + + @Test + public void itTimesOutRpcThrottlingException() { + HBaseServerExceptionPauseManager pauseManager = + new HBaseServerExceptionPauseManager(PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, 1); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(RPC_THROTTLING_EXCEPTION, 1, System.nanoTime()); + + assertFalse(pauseNanos.isPresent()); + } + + @Test + public void itTimesOutRpcOtherException() { + HBaseServerExceptionPauseManager pauseManager = + new HBaseServerExceptionPauseManager(PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, 1); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(OTHER_EXCEPTION, 1, System.nanoTime()); + + assertFalse(pauseNanos.isPresent()); + } + + @Test + public void itDoesNotTimeOutIfDisabled() { + HBaseServerExceptionPauseManager pauseManager = + new HBaseServerExceptionPauseManager(PAUSE_NANOS, PAUSE_NANOS_FOR_SERVER_OVERLOADED, 0); + + OptionalLong pauseNanos = + pauseManager.getPauseNsFromException(OTHER_EXCEPTION, 1, System.nanoTime()); + + assertTrue(pauseNanos.isPresent()); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java new file mode 100644 index 000000000000..3455b3975664 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java @@ -0,0 +1,380 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.RegionTooBusyException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.RSRpcServices; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + +@Category({ MediumTests.class, ClientTests.class }) +public class TestAsyncClientPauseForRpcThrottling { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAsyncClientPauseForRpcThrottling.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static TableName TABLE_NAME = TableName.valueOf("RpcThrottling"); + + private static byte[] FAMILY = Bytes.toBytes("Family"); + + private static byte[] QUALIFIER = Bytes.toBytes("Qualifier"); + + private static AsyncConnection CONN; + private static final AtomicBoolean THROTTLE = new AtomicBoolean(false); + private static final AtomicInteger FORCE_RETRIES = new AtomicInteger(0); + private static final long WAIT_INTERVAL_NANOS = TimeUnit.SECONDS.toNanos(1); + private static final int RETRY_COUNT = 3; + private static final int MAX_MULTIPLIER_EXPECTATION = 2; + + public static final class ThrottlingRSRpcServicesForTest extends RSRpcServices { + + public ThrottlingRSRpcServicesForTest(HRegionServer rs) throws IOException { + super(rs); + } + + @Override + public ClientProtos.GetResponse get(RpcController controller, ClientProtos.GetRequest request) + throws ServiceException { + maybeForceRetry(); + maybeThrottle(); + return super.get(controller, request); + } + + @Override + public ClientProtos.MultiResponse multi(RpcController rpcc, ClientProtos.MultiRequest request) + throws ServiceException { + maybeForceRetry(); + maybeThrottle(); + return super.multi(rpcc, request); + } + + @Override + public ClientProtos.ScanResponse scan(RpcController controller, + ClientProtos.ScanRequest request) throws ServiceException { + maybeForceRetry(); + maybeThrottle(); + return super.scan(controller, request); + } + + private void maybeForceRetry() throws ServiceException { + if (FORCE_RETRIES.get() > 0) { + FORCE_RETRIES.addAndGet(-1); + throw new ServiceException(new RegionTooBusyException("Retry")); + } + } + + private void maybeThrottle() throws ServiceException { + if (THROTTLE.get()) { + THROTTLE.set(false); + throw new ServiceException(new RpcThrottlingException("number of requests exceeded - wait " + + TimeUnit.NANOSECONDS.toMillis(WAIT_INTERVAL_NANOS) + "ms")); + } + } + } + + public static final class ThrottlingRegionServerForTest extends HRegionServer { + + public ThrottlingRegionServerForTest(Configuration conf) throws IOException { + super(conf); + } + + @Override + protected RSRpcServices createRpcServices() throws IOException { + return new ThrottlingRSRpcServicesForTest(this); + } + } + + @BeforeClass + public static void setUp() throws Exception { + assertTrue( + "The MAX_MULTIPLIER_EXPECTATION must be less than HConstants.RETRY_BACKOFF[RETRY_COUNT] " + + "in order for our tests to adequately verify that we aren't " + + "multiplying throttled pauses based on the retry count.", + MAX_MULTIPLIER_EXPECTATION < HConstants.RETRY_BACKOFF[RETRY_COUNT]); + + UTIL.getConfiguration().setLong(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + UTIL.startMiniCluster(1); + UTIL.getMiniHBaseCluster().getConfiguration().setClass(HConstants.REGION_SERVER_IMPL, + ThrottlingRegionServerForTest.class, HRegionServer.class); + HRegionServer regionServer = UTIL.getMiniHBaseCluster().startRegionServer().getRegionServer(); + + try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { + UTIL.waitTableAvailable(TABLE_NAME); + for (int i = 0; i < 100; i++) { + table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))); + } + } + + UTIL.getAdmin().move(UTIL.getAdmin().getRegions(TABLE_NAME).get(0).getEncodedNameAsBytes(), + regionServer.getServerName()); + Configuration conf = new Configuration(UTIL.getConfiguration()); + CONN = ConnectionFactory.createAsyncConnection(conf).get(); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.getAdmin().disableTable(TABLE_NAME); + UTIL.getAdmin().deleteTable(TABLE_NAME); + Closeables.close(CONN, true); + UTIL.shutdownMiniCluster(); + } + + private void assertTime(Callable callable, long time, boolean isGreater) throws Exception { + long costNs = getCostNs(callable); + if (isGreater) { + assertTrue(costNs > time); + } else { + assertTrue(costNs <= time); + } + } + + private void assertTimeBetween(Callable callable, long minNs, long maxNs) throws Exception { + long costNs = getCostNs(callable); + assertTrue(costNs > minNs); + assertTrue(costNs < maxNs); + } + + private long getCostNs(Callable callable) throws Exception { + long startNs = System.nanoTime(); + callable.call(); + return System.nanoTime() - startNs; + } + + @Test + public void itWaitsForThrottledGet() throws Exception { + boolean isThrottled = true; + THROTTLE.set(isThrottled); + AsyncTable table = CONN.getTable(TABLE_NAME); + assertTime(() -> { + table.get(new Get(Bytes.toBytes(0))).get(); + return null; + }, WAIT_INTERVAL_NANOS, isThrottled); + } + + @Test + public void itDoesNotWaitForUnthrottledGet() throws Exception { + boolean isThrottled = false; + THROTTLE.set(isThrottled); + AsyncTable table = CONN.getTable(TABLE_NAME); + assertTime(() -> { + table.get(new Get(Bytes.toBytes(0))).get(); + return null; + }, WAIT_INTERVAL_NANOS, isThrottled); + } + + @Test + public void itDoesNotWaitForThrottledGetExceedingTimeout() throws Exception { + AsyncTable table = + CONN.getTableBuilder(TABLE_NAME).setOperationTimeout(1, TimeUnit.MILLISECONDS).build(); + boolean isThrottled = true; + THROTTLE.set(isThrottled); + assertTime(() -> { + assertThrows(ExecutionException.class, () -> table.get(new Get(Bytes.toBytes(0))).get()); + return null; + }, WAIT_INTERVAL_NANOS, false); + } + + @Test + public void itDoesNotMultiplyThrottledGetWait() throws Exception { + THROTTLE.set(true); + FORCE_RETRIES.set(RETRY_COUNT); + + AsyncTable table = + CONN.getTableBuilder(TABLE_NAME).setOperationTimeout(1, TimeUnit.MINUTES) + .setMaxRetries(RETRY_COUNT + 1).setRetryPause(1, TimeUnit.NANOSECONDS).build(); + + assertTimeBetween(() -> { + table.get(new Get(Bytes.toBytes(0))).get(); + return null; + }, WAIT_INTERVAL_NANOS, MAX_MULTIPLIER_EXPECTATION * WAIT_INTERVAL_NANOS); + } + + @Test + public void itWaitsForThrottledBatch() throws Exception { + boolean isThrottled = true; + THROTTLE.set(isThrottled); + assertTime(() -> { + List> futures = new ArrayList<>(); + try (AsyncBufferedMutator mutator = CONN.getBufferedMutator(TABLE_NAME)) { + for (int i = 100; i < 110; i++) { + futures.add(mutator + .mutate(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))); + } + } + return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get(); + }, WAIT_INTERVAL_NANOS, isThrottled); + } + + @Test + public void itDoesNotWaitForUnthrottledBatch() throws Exception { + boolean isThrottled = false; + THROTTLE.set(isThrottled); + assertTime(() -> { + List> futures = new ArrayList<>(); + try (AsyncBufferedMutator mutator = CONN.getBufferedMutator(TABLE_NAME)) { + for (int i = 100; i < 110; i++) { + futures.add(mutator + .mutate(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))); + } + } + return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get(); + }, WAIT_INTERVAL_NANOS, isThrottled); + } + + @Test + public void itDoesNotWaitForThrottledBatchExceedingTimeout() throws Exception { + boolean isThrottled = true; + THROTTLE.set(isThrottled); + assertTime(() -> { + List> futures = new ArrayList<>(); + try (AsyncBufferedMutator mutator = CONN.getBufferedMutatorBuilder(TABLE_NAME) + .setOperationTimeout(1, TimeUnit.MILLISECONDS).build()) { + for (int i = 100; i < 110; i++) { + futures.add(mutator + .mutate(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))); + } + } + assertThrows(ExecutionException.class, + () -> CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get()); + return null; + }, WAIT_INTERVAL_NANOS, false); + } + + @Test + public void itDoesNotMultiplyThrottledBatchWait() throws Exception { + THROTTLE.set(true); + FORCE_RETRIES.set(RETRY_COUNT); + + assertTimeBetween(() -> { + List> futures = new ArrayList<>(); + try (AsyncBufferedMutator mutator = + CONN.getBufferedMutatorBuilder(TABLE_NAME).setOperationTimeout(1, TimeUnit.MINUTES) + .setMaxRetries(RETRY_COUNT + 1).setRetryPause(1, TimeUnit.NANOSECONDS).build()) { + for (int i = 100; i < 110; i++) { + futures.add(mutator + .mutate(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))); + } + } + CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get(); + return null; + }, WAIT_INTERVAL_NANOS, MAX_MULTIPLIER_EXPECTATION * WAIT_INTERVAL_NANOS); + } + + @Test + public void itWaitsForThrottledScan() throws Exception { + boolean isThrottled = true; + THROTTLE.set(isThrottled); + assertTime(() -> { + try ( + ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(new Scan().setCaching(80))) { + for (int i = 0; i < 100; i++) { + Result result = scanner.next(); + assertArrayEquals(Bytes.toBytes(i), result.getValue(FAMILY, QUALIFIER)); + } + } + return null; + }, WAIT_INTERVAL_NANOS, isThrottled); + } + + @Test + public void itDoesNotWaitForUnthrottledScan() throws Exception { + boolean isThrottled = false; + THROTTLE.set(isThrottled); + assertTime(() -> { + try ( + ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(new Scan().setCaching(80))) { + for (int i = 0; i < 100; i++) { + Result result = scanner.next(); + assertArrayEquals(Bytes.toBytes(i), result.getValue(FAMILY, QUALIFIER)); + } + } + return null; + }, WAIT_INTERVAL_NANOS, isThrottled); + } + + @Test + public void itDoesNotWaitForThrottledScanExceedingTimeout() throws Exception { + AsyncTable table = + CONN.getTableBuilder(TABLE_NAME).setScanTimeout(1, TimeUnit.MILLISECONDS).build(); + boolean isThrottled = true; + THROTTLE.set(isThrottled); + assertTime(() -> { + try (ResultScanner scanner = table.getScanner(new Scan().setCaching(80))) { + for (int i = 0; i < 100; i++) { + assertThrows(RetriesExhaustedException.class, scanner::next); + } + } + return null; + }, WAIT_INTERVAL_NANOS, false); + } + + @Test + public void itDoesNotMultiplyThrottledScanWait() throws Exception { + THROTTLE.set(true); + FORCE_RETRIES.set(RETRY_COUNT); + + AsyncTable table = + CONN.getTableBuilder(TABLE_NAME).setOperationTimeout(1, TimeUnit.MINUTES) + .setMaxRetries(RETRY_COUNT + 1).setRetryPause(1, TimeUnit.NANOSECONDS).build(); + + assertTimeBetween(() -> { + try (ResultScanner scanner = table.getScanner(new Scan().setCaching(80))) { + for (int i = 0; i < 100; i++) { + Result result = scanner.next(); + assertArrayEquals(Bytes.toBytes(i), result.getValue(FAMILY, QUALIFIER)); + } + } + return null; + }, WAIT_INTERVAL_NANOS, MAX_MULTIPLIER_EXPECTATION * WAIT_INTERVAL_NANOS); + } +} From 05a47fe8ea3e0bdd789a2bc8a333bf609bdcf37a Mon Sep 17 00:00:00 2001 From: chenglei Date: Mon, 3 Jul 2023 21:09:35 +0800 Subject: [PATCH 020/514] HBASE-27954 Eliminate duplicate code for getNonRootIndexedKey in HFileBlockIndex (#5312) Signed-off-by: Duo Zhang --- .../hbase/io/hfile/HFileBlockIndex.java | 12 ++--- .../hbase/io/hfile/NoOpIndexBlockEncoder.java | 47 ++----------------- 2 files changed, 8 insertions(+), 51 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 12ef197af439..592c19c866cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -399,14 +399,8 @@ public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { HFileBlock midLeafBlock = cachingBlockReader.readBlock(midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null); try { - ByteBuff b = midLeafBlock.getBufferWithoutHeader(); - int numDataBlocks = b.getIntAfterPosition(0); - int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1)); - int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset - - SECONDARY_INDEX_ENTRY_OVERHEAD; - int keyOffset = - Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD; - byte[] bytes = b.toBytes(keyOffset, keyLen); + byte[] bytes = getNonRootIndexedKey(midLeafBlock.getBufferWithoutHeader(), midKeyEntry); + assert bytes != null; targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); } finally { midLeafBlock.release(); @@ -699,7 +693,7 @@ public int rootBlockContainingKey(final byte[] key, int offset, int length) { * @param i the ith position * @return The indexed key at the ith position in the nonRootIndex. */ - protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { + static byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { int numEntries = nonRootIndex.getInt(0); if (i < 0 || i >= numEntries) { return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java index 3115a5153c21..4162fca6afe5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.MID_KEY_METADATA_SIZE; -import static org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; import java.io.DataInput; import java.io.DataInputStream; @@ -269,14 +268,9 @@ public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOExcepti HFileBlock midLeafBlock = cachingBlockReader.readBlock(midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null); try { - ByteBuff b = midLeafBlock.getBufferWithoutHeader(); - int numDataBlocks = b.getIntAfterPosition(0); - int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1)); - int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset - - SECONDARY_INDEX_ENTRY_OVERHEAD; - int keyOffset = - Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD; - byte[] bytes = b.toBytes(keyOffset, keyLen); + byte[] bytes = HFileBlockIndex.BlockIndexReader + .getNonRootIndexedKey(midLeafBlock.getBufferWithoutHeader(), midKeyEntry); + assert bytes != null; targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); } finally { midLeafBlock.release(); @@ -379,7 +373,8 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB currentOnDiskSize = buffer.getInt(); // Only update next indexed key if there is a next indexed key in the current level - byte[] nonRootIndexedKey = getNonRootIndexedKey(buffer, index + 1); + byte[] nonRootIndexedKey = + HFileBlockIndex.BlockIndexReader.getNonRootIndexedKey(buffer, index + 1); if (nonRootIndexedKey != null) { tmpNextIndexKV.setKey(nonRootIndexedKey, 0, nonRootIndexedKey.length); nextIndexedKey = tmpNextIndexKV; @@ -441,37 +436,5 @@ public String toString() { } return sb.toString(); } - - /** - * The indexed key at the ith position in the nonRootIndex. The position starts at 0. - * @param i the ith position - * @return The indexed key at the ith position in the nonRootIndex. - */ - protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { - int numEntries = nonRootIndex.getInt(0); - if (i < 0 || i >= numEntries) { - return null; - } - - // Entries start after the number of entries and the secondary index. - // The secondary index takes numEntries + 1 ints. - int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); - // Targetkey's offset relative to the end of secondary index - int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 1)); - - // The offset of the target key in the blockIndex buffer - int targetKeyOffset = entriesOffset // Skip secondary index - + targetKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size - - // We subtract the two consecutive secondary index elements, which - // gives us the size of the whole (offset, onDiskSize, key) tuple. We - // then need to subtract the overhead of offset and onDiskSize. - int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset - - SECONDARY_INDEX_ENTRY_OVERHEAD; - - // TODO check whether we can make BB backed Cell here? So can avoid bytes copy. - return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength); - } } } From d8447d9fef57076c479f7ced6564a0cfb582cf3e Mon Sep 17 00:00:00 2001 From: guluo Date: Wed, 5 Jul 2023 22:07:49 +0800 Subject: [PATCH 021/514] HBASE-27942 Update the description about hbase.hstore.compactionThreshold (#5302) Signed-off-by: Duo Zhang --- hbase-common/src/main/resources/hbase-default.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index a4ee0c8b20b6..d2ecef6bda3d 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -812,7 +812,7 @@ possible configurations would overwhelm and obscure the important. hbase.hstore.compactionThreshold 3 - If more than this number of StoreFiles exist in any one Store + If more than or equal to this number of StoreFiles exist in any one Store (one StoreFile is written per flush of MemStore), a compaction is run to rewrite all StoreFiles into a single StoreFile. Larger values delay compaction, but when compaction does occur, it takes longer to complete. From 2ca012f6f868bcda7d0faa7d343a4627c919fb86 Mon Sep 17 00:00:00 2001 From: Yash Dodeja Date: Wed, 5 Jul 2023 19:43:25 +0530 Subject: [PATCH 022/514] HBASE-27663 ChaosMonkey documentation enhancements (#5172) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/developer.adoc | 60 +++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 20f96921b1d3..ea327fb3e253 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1759,7 +1759,7 @@ following example runs ChaosMonkey with the default configuration: [source,bash] ---- -$ bin/hbase org.apache.hadoop.hbase.util.ChaosMonkey +$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner 12/11/19 23:21:57 INFO util.ChaosMonkey: Using ChaosMonkey Policy: class org.apache.hadoop.hbase.util.ChaosMonkey$PeriodicRandomActionPolicy, period:60000 12/11/19 23:21:57 INFO util.ChaosMonkey: Sleeping for 26953 to add jitter @@ -1801,6 +1801,64 @@ $ bin/hbase org.apache.hadoop.hbase.util.ChaosMonkey The output indicates that ChaosMonkey started the default `PeriodicRandomActionPolicy` policy, which is configured with all the available actions. It chose to run `RestartActiveMaster` and `RestartRandomRs` actions. +==== ChaosMonkey without SSH + +Chaos monkey can be run without SSH using the Chaos service and ZNode cluster manager. HBase ships +with many cluster managers, available in the `hbase-it/src/test/java/org/apache/hadoop/hbase/` directory. + +Set the following property in hbase configuration to switch to `ZNodeClusterManager`: +`hbase.it.clustermanager.class=org.apache.hadoop.hbase.ZNodeClusterManager` + +Start chaos agent on all hosts where you want to test chaos scenarios. + +[source,bash] +---- +$ bin/hbase org.apache.hadoop.hbase.chaos.ChaosService -c start + +Start chaos monkey runner from any one host, preferrably an edgenode. +An example log while running chaos monkey with default policy PeriodicRandomActionPolicy is shown below. +Command Options: + -c Name of extra configurations file to find on CLASSPATH + -m,--monkey Which chaos monkey to run + -monkeyProps The properties file for specifying chaos monkey properties. + -tableName Table name in the test to run chaos monkey against + -familyName Family name in the test to run chaos monkey against + +[source,bash] +---- +$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner + +INFO [main] hbase.HBaseCommonTestingUtility: Instantiating org.apache.hadoop.hbase.ZNodeClusterManager +INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe] zookeeper.ZooKeeper: Initiating client connection, connectString=host1.example.com:2181,host2.example.com:2181,host3.example.com:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$19/2106254492@1a39cf8 +INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe] zookeeper.ClientCnxnSocket: jute.maxbuffer value is 4194304 Bytes +INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe] zookeeper.ClientCnxn: zookeeper.request.timeout value is 0. feature enabled= +INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe-SendThread(host2.example.com:2181)] zookeeper.ClientCnxn: Opening socket connection to server host2.example.com/10.20.30.40:2181. Will not attempt to authenticate using SASL (unknown error) +INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe-SendThread(host2.example.com:2181)] zookeeper.ClientCnxn: Socket connection established, initiating session, client: /10.20.30.40:35164, server: host2.example.com/10.20.30.40:2181 +INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe-SendThread(host2.example.com:2181)] zookeeper.ClientCnxn: Session establishment complete on server host2.example.com/10.20.30.40:2181, sessionid = 0x101de9204670877, negotiated timeout = 60000 +INFO [main] policies.Policy: Using ChaosMonkey Policy class org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy, period=60000 ms + [ChaosMonkey-2] policies.Policy: Sleeping for 93741 ms to add jitter +INFO [ChaosMonkey-0] policies.Policy: Sleeping for 9752 ms to add jitter +INFO [ChaosMonkey-1] policies.Policy: Sleeping for 65562 ms to add jitter +INFO [ChaosMonkey-3] policies.Policy: Sleeping for 38777 ms to add jitter +INFO [ChaosMonkey-0] actions.CompactRandomRegionOfTableAction: Performing action: Compact random region of table usertable, major=false +INFO [ChaosMonkey-0] policies.Policy: Sleeping for 59532 ms +INFO [ChaosMonkey-3] client.ConnectionImplementation: Getting master connection state from TTL Cache +INFO [ChaosMonkey-3] client.ConnectionImplementation: Getting master state using rpc call +INFO [ChaosMonkey-3] actions.DumpClusterStatusAction: Cluster status +Master: host1.example.com,16000,1678339058222 +Number of backup masters: 0 +Number of live region servers: 3 + host1.example.com,16020,1678794551244 + host2.example.com,16020,1678341258970 + host3.example.com,16020,1678347834336 +Number of dead region servers: 0 +Number of unknown region servers: 0 +Average load: 123.6666666666666 +Number of requests: 118645157 +Number of regions: 2654 +Number of regions in transition: 0 +INFO [ChaosMonkey-3] policies.Policy: Sleeping for 89614 ms + ==== Available Policies HBase ships with several ChaosMonkey policies, available in the `hbase/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/` directory. From d1f29d06ece649c32caa2ae5551e9a67a540014a Mon Sep 17 00:00:00 2001 From: guluo Date: Thu, 6 Jul 2023 10:19:32 +0800 Subject: [PATCH 023/514] HBASE-27920 Skipping compact for this region if the table disable compaction (#5273) Signed-off-by: Duo Zhang Signed-off-by: Wellington Chevreuil --- .../org/apache/hadoop/hbase/regionserver/HRegionServer.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 1c5940ca9aed..f9f841181064 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1638,8 +1638,9 @@ private static class CompactionChecker extends ScheduledChore { @Override protected void chore() { for (Region r : this.instance.onlineRegions.values()) { - // Skip compaction if region is read only - if (r == null || r.isReadOnly()) { + // If region is read only or compaction is disabled at table level, there's no need to + // iterate through region's stores + if (r == null || r.isReadOnly() || !r.getTableDescriptor().isCompactionEnabled()) { continue; } From 047f077f0b6117985eab9d02bb10bb8c328fabfb Mon Sep 17 00:00:00 2001 From: Fantasy-Jay <13631435453@163.com> Date: Thu, 6 Jul 2023 10:32:08 +0800 Subject: [PATCH 024/514] HBASE-27845 Distinguish the mutate types of rpc error in MetricsConnection. (#5224) Co-authored-by: fantasy <875282031@qq.com> Co-authored-by: jay.zhu Signed-off-by: Duo Zhang --- .../hbase/client/MetricsConnection.java | 25 +++++++++++- .../hbase/client/TestMetricsConnection.java | 39 ++++++++++++++----- 2 files changed, 52 insertions(+), 12 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index dd3e571a8b7a..8a299dc4e5c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -652,7 +652,28 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats, T concurrentCallsPerServerHist.update(callsPerServer); } // Update the counter that tracks RPCs by type. - final String methodName = method.getService().getName() + "_" + method.getName(); + StringBuilder methodName = new StringBuilder(); + methodName.append(method.getService().getName()).append("_").append(method.getName()); + // Distinguish mutate types. + if ("Mutate".equals(method.getName())) { + final MutationType type = ((MutateRequest) param).getMutation().getMutateType(); + switch (type) { + case APPEND: + methodName.append("(Append)"); + break; + case DELETE: + methodName.append("(Delete)"); + break; + case INCREMENT: + methodName.append("(Increment)"); + break; + case PUT: + methodName.append("(Put)"); + break; + default: + methodName.append("(Unknown)"); + } + } getMetric(CNT_BASE + methodName, rpcCounters, counterFactory).inc(); if (e != null) { getMetric(FAILURE_CNT_BASE + methodName, rpcCounters, counterFactory).inc(); @@ -729,7 +750,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats, T } } // Fallback to dynamic registry lookup for DDL methods. - updateRpcGeneric(methodName, stats); + updateRpcGeneric(methodName.toString(), stats); } public void incrCacheDroppingExceptions(Object exception) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index d70d2cf60006..2afdc7ee558d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -99,7 +99,7 @@ public void testMetricsConnectionScope() throws IOException { } @Test - public void testMetricsWithMutiConnections() throws IOException { + public void testMetricsWithMultiConnections() throws IOException { Configuration conf = new Configuration(); conf.setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true); conf.set(MetricsConnection.METRICS_SCOPE_KEY, "unit-test"); @@ -178,7 +178,8 @@ public void testStaticMetrics() throws IOException { MutateRequest.newBuilder() .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))).setRegion(region) .build(), - MetricsConnection.newCallStats(), null); + MetricsConnection.newCallStats(), + new CallTimeoutException("test with CallTimeoutException")); } final String rpcCountPrefix = "rpcCount_" + ClientService.getDescriptor().getName() + "_"; @@ -188,20 +189,38 @@ public void testStaticMetrics() throws IOException { long metricVal; Counter counter; - for (String method : new String[] { "Get", "Scan", "Multi", "Mutate" }) { + for (String method : new String[] { "Get", "Scan", "Multi" }) { metricKey = rpcCountPrefix + method; metricVal = METRICS.getRpcCounters().get(metricKey).getCount(); - assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); metricKey = rpcFailureCountPrefix + method; counter = METRICS.getRpcCounters().get(metricKey); metricVal = (counter != null) ? counter.getCount() : 0; - if (method.equals("Get") || method.equals("Mutate")) { + if (method.equals("Get")) { // no failure - assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal == 0); + assertEquals("metric: " + metricKey + " val: " + metricVal, 0, metricVal); } else { // has failure - assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal == loop); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); + } + } + + String method = "Mutate"; + for (String mutationType : new String[] { "Append", "Delete", "Increment", "Put" }) { + metricKey = rpcCountPrefix + method + "(" + mutationType + ")"; + metricVal = METRICS.getRpcCounters().get(metricKey).getCount(); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); + + metricKey = rpcFailureCountPrefix + method + "(" + mutationType + ")"; + counter = METRICS.getRpcCounters().get(metricKey); + metricVal = (counter != null) ? counter.getCount() : 0; + if (mutationType.equals("Put")) { + // has failure + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); + } else { + // no failure + assertEquals("metric: " + metricKey + " val: " + metricVal, 0, metricVal); } } @@ -209,19 +228,19 @@ public void testStaticMetrics() throws IOException { metricKey = "rpcRemoteExceptions_IOException"; counter = METRICS.getRpcCounters().get(metricKey); metricVal = (counter != null) ? counter.getCount() : 0; - assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal == loop); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); // local exception metricKey = "rpcLocalExceptions_CallTimeoutException"; counter = METRICS.getRpcCounters().get(metricKey); metricVal = (counter != null) ? counter.getCount() : 0; - assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal == loop); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop * 2); // total exception metricKey = "rpcTotalExceptions"; counter = METRICS.getRpcCounters().get(metricKey); metricVal = (counter != null) ? counter.getCount() : 0; - assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal == loop * 2); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop * 3); for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getGetTracker(), METRICS.getScanTracker(), METRICS.getMultiTracker(), From b2e2abe64bd9f3d511b8193510fe66c76ff7854c Mon Sep 17 00:00:00 2001 From: guluo Date: Thu, 6 Jul 2023 10:32:38 +0800 Subject: [PATCH 025/514] HBASE-27859 HMaster.getCompactionState can happen NPE when region state is closed (#5232) Signed-off-by: Duo Zhang Signed-off-by: Wellington Chevreuil --- .../main/java/org/apache/hadoop/hbase/master/HMaster.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3d59db245015..8cb40cb58803 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -4208,6 +4208,11 @@ public CompactionState getCompactionState(final TableName tableName) { continue; } RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName()); + if (regionMetrics == null) { + LOG.warn("Can not get compaction details for the region: {} , it may be not online.", + regionInfo.getRegionNameAsString()); + continue; + } if (regionMetrics.getCompactionState() == CompactionState.MAJOR) { if (compactionState == CompactionState.MINOR) { compactionState = CompactionState.MAJOR_AND_MINOR; From 1d704a79ab8942b5092640d7274826fd4d31c01d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 11 Jul 2023 21:40:50 +0800 Subject: [PATCH 026/514] HBASE-27969 TableReplicationQueueStorage.hasData does not work as expected (#5321) Signed-off-by: Liangjun He --- .../hadoop/hbase/replication/TableReplicationQueueStorage.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java index e59edd52f793..8fff3d461f74 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java @@ -533,7 +533,7 @@ public Set getAllHFileRefs() throws ReplicationException { @Override public boolean hasData() throws ReplicationException { try { - return conn.getAdmin().getDescriptor(tableName) != null; + return conn.getAdmin().tableExists(tableName); } catch (IOException e) { throw new ReplicationException("failed to get replication queue table", e); } From 3353381338c4a19825f41fefd103b5ad12be8b2a Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 13 Jul 2023 21:24:43 +0800 Subject: [PATCH 027/514] HBASE-27231 FSHLog should retry writing WAL entries when syncs to HDFS failed (#5317) Co-authored-by: Duo Zhang Co-authored-by: chenglei Signed-off-by: chenglei Signed-off-by: Duo Zhang --- .../hbase/regionserver/wal/AbstractFSWAL.java | 752 +++++++++++++- .../hbase/regionserver/wal/AsyncFSWAL.java | 651 +----------- .../hadoop/hbase/regionserver/wal/FSHLog.java | 977 ++++-------------- .../regionserver/TestFailedAppendAndSync.java | 18 +- .../hbase/regionserver/TestHRegion.java | 12 + .../hbase/regionserver/TestWALLockup.java | 454 -------- .../regionserver/wal/AbstractTestFSWAL.java | 2 +- .../hbase/regionserver/wal/TestFSHLog.java | 102 +- .../regionserver/wal/TestLogRollAbort.java | 53 +- .../regionserver/wal/TestLogRolling.java | 213 ++-- pom.xml | 2 +- 11 files changed, 1112 insertions(+), 2124 deletions(-) delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 8df65487c676..b3445ab42423 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -19,13 +19,17 @@ import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.ERROR; import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.LOW_REPLICATION; +import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.SIZE; import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.SLOW_SYNC; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.WAL_IMPL; +import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; import com.lmax.disruptor.RingBuffer; +import com.lmax.disruptor.Sequence; +import com.lmax.disruptor.Sequencer; import io.opentelemetry.api.trace.Span; import java.io.FileNotFoundException; import java.io.IOException; @@ -33,14 +37,20 @@ import java.lang.management.MemoryType; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; +import java.util.Deque; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.OptionalLong; import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -56,7 +66,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; import org.apache.commons.lang3.mutable.MutableLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -95,6 +108,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -128,6 +142,9 @@ public abstract class AbstractFSWAL implements WAL { private static final Logger LOG = LoggerFactory.getLogger(AbstractFSWAL.class); + private static final Comparator SEQ_COMPARATOR = + Comparator.comparingLong(SyncFuture::getTxid).thenComparingInt(System::identityHashCode); + private static final String SURVIVED_TOO_LONG_SEC_KEY = "hbase.regionserver.wal.too.old.sec"; private static final int SURVIVED_TOO_LONG_SEC_DEFAULT = 900; /** Don't log blocking regions more frequently than this. */ @@ -157,6 +174,9 @@ public abstract class AbstractFSWAL implements WAL { public static final String WAL_SHUTDOWN_WAIT_TIMEOUT_MS = "hbase.wal.shutdown.wait.timeout.ms"; public static final int DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS = 15 * 1000; + public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; + public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; + /** * file system instance */ @@ -368,6 +388,57 @@ private static final class WALProps { private final int archiveRetries; + protected ExecutorService consumeExecutor; + + private final Lock consumeLock = new ReentrantLock(); + + protected final Runnable consumer = this::consume; + + // check if there is already a consumer task in the event loop's task queue + protected Supplier hasConsumerTask; + + private static final int MAX_EPOCH = 0x3FFFFFFF; + // the lowest bit is waitingRoll, which means new writer is created and we are waiting for old + // writer to be closed. + // the second lowest bit is writerBroken which means the current writer is broken and rollWriter + // is needed. + // all other bits are the epoch number of the current writer, this is used to detect whether the + // writer is still the one when you issue the sync. + // notice that, modification to this field is only allowed under the protection of consumeLock. + private volatile int epochAndState; + + private boolean readyForRolling; + + private final Condition readyForRollingCond = consumeLock.newCondition(); + + private final RingBuffer waitingConsumePayloads; + + private final Sequence waitingConsumePayloadsGatingSequence; + + private final AtomicBoolean consumerScheduled = new AtomicBoolean(false); + + private final long batchSize; + + protected final Deque toWriteAppends = new ArrayDeque<>(); + + protected final Deque unackedAppends = new ArrayDeque<>(); + + protected final SortedSet syncFutures = new TreeSet<>(SEQ_COMPARATOR); + + // the highest txid of WAL entries being processed + protected long highestProcessedAppendTxid; + + // file length when we issue last sync request on the writer + private long fileLengthAtLastSync; + + private long highestProcessedAppendTxidAtLastSync; + + private int waitOnShutdownInSeconds; + + private String waitOnShutdownInSecondsConfigKey; + + protected boolean shouldShutDownConsumeExecutorWhenClose = true; + public long getFilenum() { return this.filenum.get(); } @@ -414,6 +485,23 @@ protected final int getPreallocatedEventCount() { return floor << 1; } + protected final void setWaitOnShutdownInSeconds(int waitOnShutdownInSeconds, + String waitOnShutdownInSecondsConfigKey) { + this.waitOnShutdownInSeconds = waitOnShutdownInSeconds; + this.waitOnShutdownInSecondsConfigKey = waitOnShutdownInSecondsConfigKey; + } + + protected final void createSingleThreadPoolConsumeExecutor(String walType, final Path rootDir, + final String prefix) { + ThreadPoolExecutor threadPool = + new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), + new ThreadFactoryBuilder().setNameFormat(walType + "-%d-" + rootDir.toString() + "-prefix:" + + (prefix == null ? "default" : prefix).replace("%", "%%")).setDaemon(true).build()); + hasConsumerTask = () -> threadPool.getQueue().peek() == consumer; + consumeExecutor = threadPool; + this.shouldShutDownConsumeExecutorWhenClose = true; + } + protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String logDir, final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, final String suffix) @@ -527,6 +615,19 @@ public boolean accept(final Path fileName) { archiveRetries = this.conf.getInt("hbase.regionserver.walroll.archive.retries", 0); this.walShutdownTimeout = conf.getLong(WAL_SHUTDOWN_WAIT_TIMEOUT_MS, DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS); + + int preallocatedEventCount = + conf.getInt("hbase.regionserver.wal.disruptor.event.count", 1024 * 16); + waitingConsumePayloads = + RingBuffer.createMultiProducer(RingBufferTruck::new, preallocatedEventCount); + waitingConsumePayloadsGatingSequence = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); + waitingConsumePayloads.addGatingSequences(waitingConsumePayloadsGatingSequence); + + // inrease the ringbuffer sequence so our txid is start from 1 + waitingConsumePayloads.publish(waitingConsumePayloads.next()); + waitingConsumePayloadsGatingSequence.set(waitingConsumePayloads.getCursor()); + + batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE); } /** @@ -615,10 +716,6 @@ public final void sync(long txid, boolean forceSync) throws IOException { TraceUtil.trace(() -> doSync(txid, forceSync), () -> createSpan("WAL.sync")); } - protected abstract void doSync(boolean forceSync) throws IOException; - - protected abstract void doSync(long txid, boolean forceSync) throws IOException; - /** * This is a convenience method that computes a new filename with a given file-number. * @param filenum to use @@ -735,7 +832,7 @@ Map> findRegionsToForceFlush() throws IOException { /** * Mark this WAL file as closed and call cleanOldLogs to see if we can archive this file. */ - protected final void markClosedAndClean(Path path) { + private void markClosedAndClean(Path path) { WALProps props = walFile2Props.get(path); // typically this should not be null, but if there is no big issue if it is already null, so // let's make the code more robust @@ -1305,6 +1402,432 @@ public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws () -> createSpan("WAL.appendMarker")); } + /** + * Helper that marks the future as DONE and offers it back to the cache. + */ + protected void markFutureDoneAndOffer(SyncFuture future, long txid, Throwable t) { + future.done(txid, t); + syncFutureCache.offer(future); + } + + private static boolean waitingRoll(int epochAndState) { + return (epochAndState & 1) != 0; + } + + private static boolean writerBroken(int epochAndState) { + return ((epochAndState >>> 1) & 1) != 0; + } + + private static int epoch(int epochAndState) { + return epochAndState >>> 2; + } + + // return whether we have successfully set readyForRolling to true. + private boolean trySetReadyForRolling() { + // Check without holding lock first. Usually we will just return here. + // waitingRoll is volatile and unacedEntries is only accessed inside event loop so it is safe to + // check them outside the consumeLock. + if (!waitingRoll(epochAndState) || !unackedAppends.isEmpty()) { + return false; + } + consumeLock.lock(); + try { + // 1. a roll is requested + // 2. all out-going entries have been acked(we have confirmed above). + if (waitingRoll(epochAndState)) { + readyForRolling = true; + readyForRollingCond.signalAll(); + return true; + } else { + return false; + } + } finally { + consumeLock.unlock(); + } + } + + private void syncFailed(long epochWhenSync, Throwable error) { + LOG.warn("sync failed", error); + this.onException(epochWhenSync, error); + } + + private void onException(long epochWhenSync, Throwable error) { + boolean shouldRequestLogRoll = true; + consumeLock.lock(); + try { + int currentEpochAndState = epochAndState; + if (epoch(currentEpochAndState) != epochWhenSync || writerBroken(currentEpochAndState)) { + // this is not the previous writer which means we have already rolled the writer. + // or this is still the current writer, but we have already marked it as broken and request + // a roll. + return; + } + this.epochAndState = currentEpochAndState | 0b10; + if (waitingRoll(currentEpochAndState)) { + readyForRolling = true; + readyForRollingCond.signalAll(); + // this means we have already in the middle of a rollWriter so just tell the roller thread + // that you can continue without requesting an extra log roll. + shouldRequestLogRoll = false; + } + } finally { + consumeLock.unlock(); + } + for (Iterator iter = unackedAppends.descendingIterator(); iter.hasNext();) { + toWriteAppends.addFirst(iter.next()); + } + highestUnsyncedTxid = highestSyncedTxid.get(); + if (shouldRequestLogRoll) { + // request a roll. + requestLogRoll(ERROR); + } + } + + private void syncCompleted(long epochWhenSync, W writer, long processedTxid, long startTimeNs) { + // Please see the last several comments on HBASE-22761, it is possible that we get a + // syncCompleted which acks a previous sync request after we received a syncFailed on the same + // writer. So here we will also check on the epoch and state, if the epoch has already been + // changed, i.e, we have already rolled the writer, or the writer is already broken, we should + // just skip here, to avoid mess up the state or accidentally release some WAL entries and + // cause data corruption. + // The syncCompleted call is on the critical write path so we should try our best to make it + // fast. So here we do not hold consumeLock, for increasing performance. It is safe because + // there are only 3 possible situations: + // 1. For normal case, the only place where we change epochAndState is when rolling the writer. + // Before rolling actually happen, we will only change the state to waitingRoll which is another + // bit than writerBroken, and when we actually change the epoch, we can make sure that there is + // no out going sync request. So we will always pass the check here and there is no problem. + // 2. The writer is broken, but we have not called syncFailed yet. In this case, since + // syncFailed and syncCompleted are executed in the same thread, we will just face the same + // situation with #1. + // 3. The writer is broken, and syncFailed has been called. Then when we arrive here, there are + // only 2 possible situations: + // a. we arrive before we actually roll the writer, then we will find out the writer is broken + // and give up. + // b. we arrive after we actually roll the writer, then we will find out the epoch is changed + // and give up. + // For both #a and #b, we do not need to hold the consumeLock as we will always update the + // epochAndState as a whole. + // So in general, for all the cases above, we do not need to hold the consumeLock. + int epochAndState = this.epochAndState; + if (epoch(epochAndState) != epochWhenSync || writerBroken(epochAndState)) { + LOG.warn("Got a sync complete call after the writer is broken, skip"); + return; + } + + if (processedTxid < highestSyncedTxid.get()) { + return; + } + highestSyncedTxid.set(processedTxid); + for (Iterator iter = unackedAppends.iterator(); iter.hasNext();) { + FSWALEntry entry = iter.next(); + if (entry.getTxid() <= processedTxid) { + entry.release(); + iter.remove(); + } else { + break; + } + } + postSync(System.nanoTime() - startTimeNs, finishSync()); + /** + * This method is used to be compatible with the original logic of {@link FSHLog}. + */ + checkSlowSyncCount(); + if (trySetReadyForRolling()) { + // we have just finished a roll, then do not need to check for log rolling, the writer will be + // closed soon. + return; + } + // If we haven't already requested a roll, check if we have exceeded logrollsize + if (!isLogRollRequested() && writer.getLength() > logrollsize) { + if (LOG.isDebugEnabled()) { + LOG.debug("Requesting log roll because of file size threshold; length=" + writer.getLength() + + ", logrollsize=" + logrollsize); + } + requestLogRoll(SIZE); + } + } + + // find all the sync futures between these two txids to see if we need to issue a hsync, if no + // sync futures then just use the default one. + private boolean isHsync(long beginTxid, long endTxid) { + SortedSet futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false), + new SyncFuture().reset(endTxid + 1, false)); + if (futures.isEmpty()) { + return useHsync; + } + for (SyncFuture future : futures) { + if (future.isForceSync()) { + return true; + } + } + return false; + } + + private void sync(W writer) { + fileLengthAtLastSync = writer.getLength(); + long currentHighestProcessedAppendTxid = highestProcessedAppendTxid; + boolean shouldUseHsync = + isHsync(highestProcessedAppendTxidAtLastSync, currentHighestProcessedAppendTxid); + highestProcessedAppendTxidAtLastSync = currentHighestProcessedAppendTxid; + final long startTimeNs = System.nanoTime(); + final long epoch = (long) epochAndState >>> 2L; + addListener(doWriterSync(writer, shouldUseHsync, currentHighestProcessedAppendTxid), + (result, error) -> { + if (error != null) { + syncFailed(epoch, error); + } else { + long syncedTxid = getSyncedTxid(currentHighestProcessedAppendTxid, result); + syncCompleted(epoch, writer, syncedTxid, startTimeNs); + } + }, consumeExecutor); + } + + /** + * This method is to adapt {@link FSHLog} and {@link AsyncFSWAL}. For {@link AsyncFSWAL}, we use + * {@link AbstractFSWAL#highestProcessedAppendTxid} at the point we calling + * {@link AsyncFSWAL#doWriterSync} method as successful syncedTxid. For {@link FSHLog}, because we + * use multi-thread {@code SyncRunner}s, we used the result of {@link CompletableFuture} as + * successful syncedTxid. + */ + protected long getSyncedTxid(long processedTxid, long completableFutureResult) { + return processedTxid; + } + + protected abstract CompletableFuture doWriterSync(W writer, boolean shouldUseHsync, + long txidWhenSyn); + + private int finishSyncLowerThanTxid(long txid) { + int finished = 0; + for (Iterator iter = syncFutures.iterator(); iter.hasNext();) { + SyncFuture sync = iter.next(); + if (sync.getTxid() <= txid) { + markFutureDoneAndOffer(sync, txid, null); + iter.remove(); + finished++; + } else { + break; + } + } + return finished; + } + + // try advancing the highestSyncedTxid as much as possible + private int finishSync() { + if (unackedAppends.isEmpty()) { + // All outstanding appends have been acked. + if (toWriteAppends.isEmpty()) { + // Also no appends that wait to be written out, then just finished all pending syncs. + long maxSyncTxid = highestSyncedTxid.get(); + for (SyncFuture sync : syncFutures) { + maxSyncTxid = Math.max(maxSyncTxid, sync.getTxid()); + markFutureDoneAndOffer(sync, maxSyncTxid, null); + } + highestSyncedTxid.set(maxSyncTxid); + int finished = syncFutures.size(); + syncFutures.clear(); + return finished; + } else { + // There is no append between highestProcessedAppendTxid and lowestUnprocessedAppendTxid, so + // if highestSyncedTxid >= highestProcessedAppendTxid, then all syncs whose txid are between + // highestProcessedAppendTxid and lowestUnprocessedAppendTxid can be finished. + long lowestUnprocessedAppendTxid = toWriteAppends.peek().getTxid(); + assert lowestUnprocessedAppendTxid > highestProcessedAppendTxid; + long doneTxid = lowestUnprocessedAppendTxid - 1; + highestSyncedTxid.set(doneTxid); + return finishSyncLowerThanTxid(doneTxid); + } + } else { + // There are still unacked appends. So let's move the highestSyncedTxid to the txid of the + // first unacked append minus 1. + long lowestUnackedAppendTxid = unackedAppends.peek().getTxid(); + long doneTxid = Math.max(lowestUnackedAppendTxid - 1, highestSyncedTxid.get()); + highestSyncedTxid.set(doneTxid); + return finishSyncLowerThanTxid(doneTxid); + } + } + + // confirm non-empty before calling + private static long getLastTxid(Deque queue) { + return queue.peekLast().getTxid(); + } + + private void appendAndSync() throws IOException { + final W writer = this.writer; + // maybe a sync request is not queued when we issue a sync, so check here to see if we could + // finish some. + finishSync(); + long newHighestProcessedAppendTxid = -1L; + // this is used to avoid calling peedLast every time on unackedAppends, appendAndAsync is single + // threaded, this could save us some cycles + boolean addedToUnackedAppends = false; + for (Iterator iter = toWriteAppends.iterator(); iter.hasNext();) { + FSWALEntry entry = iter.next(); + /** + * For {@link FSHog},here may throws IOException,but for {@link AsyncFSWAL}, here would not + * throw any IOException. + */ + boolean appended = appendEntry(writer, entry); + newHighestProcessedAppendTxid = entry.getTxid(); + iter.remove(); + if (appended) { + // This is possible, when we fail to sync, we will add the unackedAppends back to + // toWriteAppends, so here we may get an entry which is already in the unackedAppends. + if ( + addedToUnackedAppends || unackedAppends.isEmpty() + || getLastTxid(unackedAppends) < entry.getTxid() + ) { + unackedAppends.addLast(entry); + addedToUnackedAppends = true; + } + // See HBASE-25905, here we need to make sure that, we will always write all the entries in + // unackedAppends out. As the code in the consume method will assume that, the entries in + // unackedAppends have all been sent out so if there is roll request and unackedAppends is + // not empty, we could just return as later there will be a syncCompleted call to clear the + // unackedAppends, or a syncFailed to lead us to another state. + // There could be other ways to fix, such as changing the logic in the consume method, but + // it will break the assumption and then (may) lead to a big refactoring. So here let's use + // this way to fix first, can optimize later. + if ( + writer.getLength() - fileLengthAtLastSync >= batchSize + && (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends)) + ) { + break; + } + } + } + // if we have a newer transaction id, update it. + // otherwise, use the previous transaction id. + if (newHighestProcessedAppendTxid > 0) { + highestProcessedAppendTxid = newHighestProcessedAppendTxid; + } else { + newHighestProcessedAppendTxid = highestProcessedAppendTxid; + } + + if (writer.getLength() - fileLengthAtLastSync >= batchSize) { + // sync because buffer size limit. + sync(writer); + return; + } + if (writer.getLength() == fileLengthAtLastSync) { + // we haven't written anything out, just advance the highestSyncedSequence since we may only + // stamped some region sequence id. + if (unackedAppends.isEmpty()) { + highestSyncedTxid.set(highestProcessedAppendTxid); + finishSync(); + trySetReadyForRolling(); + } + return; + } + // reach here means that we have some unsynced data but haven't reached the batch size yet + // but we will not issue a sync directly here even if there are sync requests because we may + // have some new data in the ringbuffer, so let's just return here and delay the decision of + // whether to issue a sync in the caller method. + } + + private void consume() { + consumeLock.lock(); + try { + int currentEpochAndState = epochAndState; + if (writerBroken(currentEpochAndState)) { + return; + } + if (waitingRoll(currentEpochAndState)) { + if (writer.getLength() > fileLengthAtLastSync) { + // issue a sync + sync(writer); + } else { + if (unackedAppends.isEmpty()) { + readyForRolling = true; + readyForRollingCond.signalAll(); + } + } + return; + } + } finally { + consumeLock.unlock(); + } + long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; + for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor + <= cursorBound; nextCursor++) { + if (!waitingConsumePayloads.isPublished(nextCursor)) { + break; + } + RingBufferTruck truck = waitingConsumePayloads.get(nextCursor); + switch (truck.type()) { + case APPEND: + toWriteAppends.addLast(truck.unloadAppend()); + break; + case SYNC: + syncFutures.add(truck.unloadSync()); + break; + default: + LOG.warn("RingBufferTruck with unexpected type: " + truck.type()); + break; + } + waitingConsumePayloadsGatingSequence.set(nextCursor); + } + + /** + * This method is used to be compatible with the original logic of {@link AsyncFSWAL}. + */ + preAppendAndSync(); + try { + appendAndSync(); + } catch (IOException exception) { + /** + * For {@link FSHog},here may catch IOException,but for {@link AsyncFSWAL}, the code doesn't + * go in here. + */ + LOG.error("appendAndSync throws IOException.", exception); + onAppendEntryFailed(exception); + return; + } + if (hasConsumerTask.get()) { + return; + } + if (toWriteAppends.isEmpty()) { + if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { + consumerScheduled.set(false); + // recheck here since in append and sync we do not hold the consumeLock. Thing may + // happen like + // 1. we check cursor, no new entry + // 2. someone publishes a new entry to ringbuffer and the consumerScheduled is true and + // give up scheduling the consumer task. + // 3. we set consumerScheduled to false and also give up scheduling consumer task. + if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { + // we will give up consuming so if there are some unsynced data we need to issue a sync. + if ( + writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() + && syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync + ) { + // no new data in the ringbuffer and we have at least one sync request + sync(writer); + } + return; + } else { + // maybe someone has grabbed this before us + if (!consumerScheduled.compareAndSet(false, true)) { + return; + } + } + } + } + // reschedule if we still have something to write. + consumeExecutor.execute(consumer); + } + + protected void preAppendAndSync() { + } + + private boolean shouldScheduleConsumer() { + int currentEpochAndState = epochAndState; + if (writerBroken(currentEpochAndState) || waitingRoll(currentEpochAndState)) { + return false; + } + return consumerScheduled.compareAndSet(false, true); + } + /** * Append a set of edits to the WAL. *

@@ -1322,7 +1845,7 @@ public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws * passed in WALKey walKey parameter. Be warned that the WriteEntry is not * immediately available on return from this method. It WILL be available subsequent to a sync of * this append; otherwise, you will just have to wait on the WriteEntry to get filled in. - * @param info the regioninfo associated with append + * @param hri the regioninfo associated with append * @param key Modified by this call; we add to it this edits region edit/sequence id. * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit * sequence id that is after all currently appended edits. @@ -1335,14 +1858,95 @@ public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. */ - protected abstract long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException; + protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore) + throws IOException { + precheckBeforeAppendWALEdit(hri, key, edits, inMemstore); + long txid = + stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads); + if (shouldScheduleConsumer()) { + consumeExecutor.execute(consumer); + } + return txid; + } - protected abstract void doAppend(W writer, FSWALEntry entry) throws IOException; + protected void precheckBeforeAppendWALEdit(RegionInfo hri, WALKeyImpl key, WALEdit edits, + boolean inMemstore) throws IOException { + } + + protected void doSync(boolean forceSync) throws IOException { + long txid = waitingConsumePayloads.next(); + SyncFuture future; + try { + future = getSyncFuture(txid, forceSync); + RingBufferTruck truck = waitingConsumePayloads.get(txid); + truck.load(future); + } finally { + waitingConsumePayloads.publish(txid); + } + if (shouldScheduleConsumer()) { + consumeExecutor.execute(consumer); + } + blockOnSync(future); + } + + protected void doSync(long txid, boolean forceSync) throws IOException { + if (highestSyncedTxid.get() >= txid) { + return; + } + // here we do not use ring buffer sequence as txid + long sequence = waitingConsumePayloads.next(); + SyncFuture future; + try { + future = getSyncFuture(txid, forceSync); + RingBufferTruck truck = waitingConsumePayloads.get(sequence); + truck.load(future); + } finally { + waitingConsumePayloads.publish(sequence); + } + if (shouldScheduleConsumer()) { + consumeExecutor.execute(consumer); + } + blockOnSync(future); + } protected abstract W createWriterInstance(Path path) throws IOException, CommonFSUtils.StreamLacksCapabilityException; + protected final void waitForSafePoint() { + consumeLock.lock(); + try { + int currentEpochAndState = epochAndState; + if (writerBroken(currentEpochAndState) || this.writer == null) { + return; + } + consumerScheduled.set(true); + epochAndState = currentEpochAndState | 1; + readyForRolling = false; + consumeExecutor.execute(consumer); + while (!readyForRolling) { + readyForRollingCond.awaitUninterruptibly(); + } + } finally { + consumeLock.unlock(); + } + } + + protected final void closeWriter(W writer, Path path) { + inflightWALClosures.put(path.getName(), writer); + closeExecutor.execute(() -> { + try { + writer.close(); + } catch (IOException e) { + LOG.warn("close old writer failed", e); + } finally { + // call this even if the above close fails, as there is no other chance we can set closed to + // true, it will not cause big problems. + markClosedAndClean(path); + inflightWALClosures.remove(path.getName()); + } + }); + } + /** * Notice that you need to clear the {@link #rollRequested} flag in this method, as the new writer * will begin to work before returning from this method. If we clear the flag after returning from @@ -1350,13 +1954,127 @@ protected abstract W createWriterInstance(Path path) * clear the {@link #rollRequested} flag so we do not miss a roll request, typically before you * start writing to the new writer. */ - protected abstract void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) - throws IOException; + protected void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException { + Preconditions.checkNotNull(nextWriter); + waitForSafePoint(); + /** + * For {@link FSHLog},here would shutdown {@link FSHLog.SyncRunner}. + */ + doCleanUpResources(); + // we will call rollWriter in init method, where we want to create the first writer and + // obviously the previous writer is null, so here we need this null check. And why we must call + // logRollAndSetupWalProps before closeWriter is that, we will call markClosedAndClean after + // closing the writer asynchronously, we need to make sure the WALProps is put into + // walFile2Props before we call markClosedAndClean + if (writer != null) { + long oldFileLen = writer.getLength(); + logRollAndSetupWalProps(oldPath, newPath, oldFileLen); + closeWriter(writer, oldPath); + } else { + logRollAndSetupWalProps(oldPath, newPath, 0); + } + this.writer = nextWriter; + /** + * Here is used for {@link AsyncFSWAL} and {@link FSHLog} to set the under layer filesystem + * output after writer is replaced. + */ + onWriterReplaced(nextWriter); + this.fileLengthAtLastSync = nextWriter.getLength(); + this.highestProcessedAppendTxidAtLastSync = 0L; + consumeLock.lock(); + try { + consumerScheduled.set(true); + int currentEpoch = epochAndState >>> 2; + int nextEpoch = currentEpoch == MAX_EPOCH ? 0 : currentEpoch + 1; + // set a new epoch and also clear waitingRoll and writerBroken + this.epochAndState = nextEpoch << 2; + // Reset rollRequested status + rollRequested.set(false); + consumeExecutor.execute(consumer); + } finally { + consumeLock.unlock(); + } + } - protected abstract void doShutdown() throws IOException; + protected abstract void onWriterReplaced(W nextWriter); + + protected void doShutdown() throws IOException { + waitForSafePoint(); + /** + * For {@link FSHLog},here would shutdown {@link FSHLog.SyncRunner}. + */ + doCleanUpResources(); + if (this.writer != null) { + closeWriter(this.writer, getOldPath()); + this.writer = null; + } + closeExecutor.shutdown(); + try { + if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { + LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + + " the close of async writer doesn't complete." + + "Please check the status of underlying filesystem" + + " or increase the wait time by the config \"" + this.waitOnShutdownInSecondsConfigKey + + "\""); + } + } catch (InterruptedException e) { + LOG.error("The wait for close of async writer is interrupted"); + Thread.currentThread().interrupt(); + } + IOException error = new IOException("WAL has been closed"); + long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; + // drain all the pending sync requests + for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor + <= cursorBound; nextCursor++) { + if (!waitingConsumePayloads.isPublished(nextCursor)) { + break; + } + RingBufferTruck truck = waitingConsumePayloads.get(nextCursor); + switch (truck.type()) { + case SYNC: + syncFutures.add(truck.unloadSync()); + break; + default: + break; + } + } + // and fail them + syncFutures.forEach(f -> markFutureDoneAndOffer(f, f.getTxid(), error)); + if (this.shouldShutDownConsumeExecutorWhenClose) { + consumeExecutor.shutdown(); + } + } + + protected void doCleanUpResources() { + }; + + protected abstract void doAppend(W writer, FSWALEntry entry) throws IOException; + + /** + * This method gets the pipeline for the current WAL. + */ + abstract DatanodeInfo[] getPipeline(); + + /** + * This method gets the datanode replication count for the current WAL. + */ + abstract int getLogReplication(); protected abstract boolean doCheckLogLowReplication(); + protected boolean isWriterBroken() { + return writerBroken(epochAndState); + } + + private void onAppendEntryFailed(IOException exception) { + LOG.warn("append entry failed", exception); + final long currentEpoch = (long) epochAndState >>> 2L; + this.onException(currentEpoch, exception); + } + + protected void checkSlowSyncCount() { + } + /** Returns true if we exceeded the slow sync roll threshold over the last check interval */ protected boolean doCheckSlowSync() { boolean result = false; @@ -1407,16 +2125,6 @@ public void checkLogLowReplication(long checkInterval) { } } - /** - * This method gets the pipeline for the current WAL. - */ - abstract DatanodeInfo[] getPipeline(); - - /** - * This method gets the datanode replication count for the current WAL. - */ - abstract int getLogReplication(); - private static void split(final Configuration conf, final Path p) throws IOException { FileSystem fs = CommonFSUtils.getWALFileSystem(conf); if (!fs.exists(p)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 9be86077b525..5de9d4d6b8d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -17,32 +17,12 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.ERROR; -import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.SIZE; -import static org.apache.hadoop.hbase.util.FutureUtils.addListener; - -import com.lmax.disruptor.RingBuffer; -import com.lmax.disruptor.Sequence; -import com.lmax.disruptor.Sequencer; import java.io.IOException; import java.lang.reflect.Field; -import java.util.ArrayDeque; -import java.util.Comparator; -import java.util.Deque; import java.util.Iterator; import java.util.List; import java.util.Queue; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Supplier; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -60,10 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; -import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; @@ -129,9 +106,6 @@ public class AsyncFSWAL extends AbstractFSWAL { private static final Logger LOG = LoggerFactory.getLogger(AsyncFSWAL.class); - private static final Comparator SEQ_COMPARATOR = - Comparator.comparingLong(SyncFuture::getTxid).thenComparingInt(System::identityHashCode); - public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; @@ -145,57 +119,10 @@ public class AsyncFSWAL extends AbstractFSWAL { private final EventLoopGroup eventLoopGroup; - private final ExecutorService consumeExecutor; - private final Class channelClass; - private final Lock consumeLock = new ReentrantLock(); - - private final Runnable consumer = this::consume; - - // check if there is already a consumer task in the event loop's task queue - private final Supplier hasConsumerTask; - - private static final int MAX_EPOCH = 0x3FFFFFFF; - // the lowest bit is waitingRoll, which means new writer is created and we are waiting for old - // writer to be closed. - // the second lowest bit is writerBroken which means the current writer is broken and rollWriter - // is needed. - // all other bits are the epoch number of the current writer, this is used to detect whether the - // writer is still the one when you issue the sync. - // notice that, modification to this field is only allowed under the protection of consumeLock. - private volatile int epochAndState; - - private boolean readyForRolling; - - private final Condition readyForRollingCond = consumeLock.newCondition(); - - private final RingBuffer waitingConsumePayloads; - - private final Sequence waitingConsumePayloadsGatingSequence; - - private final AtomicBoolean consumerScheduled = new AtomicBoolean(false); - - private final long batchSize; - private volatile AsyncFSOutput fsOut; - private final Deque toWriteAppends = new ArrayDeque<>(); - - private final Deque unackedAppends = new ArrayDeque<>(); - - private final SortedSet syncFutures = new TreeSet<>(SEQ_COMPARATOR); - - // the highest txid of WAL entries being processed - private long highestProcessedAppendTxid; - - // file length when we issue last sync request on the writer - private long fileLengthAtLastSync; - - private long highestProcessedAppendTxidAtLastSync; - - private final int waitOnShutdownInSeconds; - private final StreamSlowMonitor streamSlowMonitor; public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, @@ -216,345 +143,35 @@ public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDi this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; this.streamSlowMonitor = monitor; - Supplier hasConsumerTask; if (conf.getBoolean(ASYNC_WAL_USE_SHARED_EVENT_LOOP, DEFAULT_ASYNC_WAL_USE_SHARED_EVENT_LOOP)) { this.consumeExecutor = eventLoopGroup.next(); + this.shouldShutDownConsumeExecutorWhenClose = false; if (consumeExecutor instanceof SingleThreadEventExecutor) { try { Field field = SingleThreadEventExecutor.class.getDeclaredField("taskQueue"); field.setAccessible(true); Queue queue = (Queue) field.get(consumeExecutor); - hasConsumerTask = () -> queue.peek() == consumer; + this.hasConsumerTask = () -> queue.peek() == consumer; } catch (Exception e) { LOG.warn("Can not get task queue of " + consumeExecutor + ", this is not necessary, just give up", e); - hasConsumerTask = () -> false; - } - } else { - hasConsumerTask = () -> false; - } - } else { - ThreadPoolExecutor threadPool = - new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), - new ThreadFactoryBuilder().setNameFormat("AsyncFSWAL-%d-" + rootDir.toString() - + "-prefix:" + (prefix == null ? "default" : prefix).replace("%", "%%")).setDaemon(true) - .build()); - hasConsumerTask = () -> threadPool.getQueue().peek() == consumer; - this.consumeExecutor = threadPool; - } - - this.hasConsumerTask = hasConsumerTask; - int preallocatedEventCount = - conf.getInt("hbase.regionserver.wal.disruptor.event.count", 1024 * 16); - waitingConsumePayloads = - RingBuffer.createMultiProducer(RingBufferTruck::new, preallocatedEventCount); - waitingConsumePayloadsGatingSequence = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); - waitingConsumePayloads.addGatingSequences(waitingConsumePayloadsGatingSequence); - - // inrease the ringbuffer sequence so our txid is start from 1 - waitingConsumePayloads.publish(waitingConsumePayloads.next()); - waitingConsumePayloadsGatingSequence.set(waitingConsumePayloads.getCursor()); - - batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE); - waitOnShutdownInSeconds = conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS, - DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS); - } - - /** - * Helper that marks the future as DONE and offers it back to the cache. - */ - private void markFutureDoneAndOffer(SyncFuture future, long txid, Throwable t) { - future.done(txid, t); - syncFutureCache.offer(future); - } - - private static boolean waitingRoll(int epochAndState) { - return (epochAndState & 1) != 0; - } - - private static boolean writerBroken(int epochAndState) { - return ((epochAndState >>> 1) & 1) != 0; - } - - private static int epoch(int epochAndState) { - return epochAndState >>> 2; - } - - // return whether we have successfully set readyForRolling to true. - private boolean trySetReadyForRolling() { - // Check without holding lock first. Usually we will just return here. - // waitingRoll is volatile and unacedEntries is only accessed inside event loop so it is safe to - // check them outside the consumeLock. - if (!waitingRoll(epochAndState) || !unackedAppends.isEmpty()) { - return false; - } - consumeLock.lock(); - try { - // 1. a roll is requested - // 2. all out-going entries have been acked(we have confirmed above). - if (waitingRoll(epochAndState)) { - readyForRolling = true; - readyForRollingCond.signalAll(); - return true; - } else { - return false; - } - } finally { - consumeLock.unlock(); - } - } - - private void syncFailed(long epochWhenSync, Throwable error) { - LOG.warn("sync failed", error); - boolean shouldRequestLogRoll = true; - consumeLock.lock(); - try { - int currentEpochAndState = epochAndState; - if (epoch(currentEpochAndState) != epochWhenSync || writerBroken(currentEpochAndState)) { - // this is not the previous writer which means we have already rolled the writer. - // or this is still the current writer, but we have already marked it as broken and request - // a roll. - return; - } - this.epochAndState = currentEpochAndState | 0b10; - if (waitingRoll(currentEpochAndState)) { - readyForRolling = true; - readyForRollingCond.signalAll(); - // this means we have already in the middle of a rollWriter so just tell the roller thread - // that you can continue without requesting an extra log roll. - shouldRequestLogRoll = false; - } - } finally { - consumeLock.unlock(); - } - for (Iterator iter = unackedAppends.descendingIterator(); iter.hasNext();) { - toWriteAppends.addFirst(iter.next()); - } - highestUnsyncedTxid = highestSyncedTxid.get(); - if (shouldRequestLogRoll) { - // request a roll. - requestLogRoll(ERROR); - } - } - - private void syncCompleted(long epochWhenSync, AsyncWriter writer, long processedTxid, - long startTimeNs) { - // Please see the last several comments on HBASE-22761, it is possible that we get a - // syncCompleted which acks a previous sync request after we received a syncFailed on the same - // writer. So here we will also check on the epoch and state, if the epoch has already been - // changed, i.e, we have already rolled the writer, or the writer is already broken, we should - // just skip here, to avoid mess up the state or accidentally release some WAL entries and - // cause data corruption. - // The syncCompleted call is on the critical write path so we should try our best to make it - // fast. So here we do not hold consumeLock, for increasing performance. It is safe because - // there are only 3 possible situations: - // 1. For normal case, the only place where we change epochAndState is when rolling the writer. - // Before rolling actually happen, we will only change the state to waitingRoll which is another - // bit than writerBroken, and when we actually change the epoch, we can make sure that there is - // no out going sync request. So we will always pass the check here and there is no problem. - // 2. The writer is broken, but we have not called syncFailed yet. In this case, since - // syncFailed and syncCompleted are executed in the same thread, we will just face the same - // situation with #1. - // 3. The writer is broken, and syncFailed has been called. Then when we arrive here, there are - // only 2 possible situations: - // a. we arrive before we actually roll the writer, then we will find out the writer is broken - // and give up. - // b. we arrive after we actually roll the writer, then we will find out the epoch is changed - // and give up. - // For both #a and #b, we do not need to hold the consumeLock as we will always update the - // epochAndState as a whole. - // So in general, for all the cases above, we do not need to hold the consumeLock. - int epochAndState = this.epochAndState; - if (epoch(epochAndState) != epochWhenSync || writerBroken(epochAndState)) { - LOG.warn("Got a sync complete call after the writer is broken, skip"); - return; - } - highestSyncedTxid.set(processedTxid); - for (Iterator iter = unackedAppends.iterator(); iter.hasNext();) { - FSWALEntry entry = iter.next(); - if (entry.getTxid() <= processedTxid) { - entry.release(); - iter.remove(); - } else { - break; - } - } - postSync(System.nanoTime() - startTimeNs, finishSync()); - if (trySetReadyForRolling()) { - // we have just finished a roll, then do not need to check for log rolling, the writer will be - // closed soon. - return; - } - // If we haven't already requested a roll, check if we have exceeded logrollsize - if (!isLogRollRequested() && writer.getLength() > logrollsize) { - if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + writer.getLength() - + ", logrollsize=" + logrollsize); - } - requestLogRoll(SIZE); - } - } - - // find all the sync futures between these two txids to see if we need to issue a hsync, if no - // sync futures then just use the default one. - private boolean isHsync(long beginTxid, long endTxid) { - SortedSet futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false), - new SyncFuture().reset(endTxid + 1, false)); - if (futures.isEmpty()) { - return useHsync; - } - for (SyncFuture future : futures) { - if (future.isForceSync()) { - return true; - } - } - return false; - } - - private void sync(AsyncWriter writer) { - fileLengthAtLastSync = writer.getLength(); - long currentHighestProcessedAppendTxid = highestProcessedAppendTxid; - boolean shouldUseHsync = - isHsync(highestProcessedAppendTxidAtLastSync, currentHighestProcessedAppendTxid); - highestProcessedAppendTxidAtLastSync = currentHighestProcessedAppendTxid; - final long startTimeNs = System.nanoTime(); - final long epoch = (long) epochAndState >>> 2L; - addListener(writer.sync(shouldUseHsync), (result, error) -> { - if (error != null) { - syncFailed(epoch, error); - } else { - syncCompleted(epoch, writer, currentHighestProcessedAppendTxid, startTimeNs); - } - }, consumeExecutor); - } - - private int finishSyncLowerThanTxid(long txid) { - int finished = 0; - for (Iterator iter = syncFutures.iterator(); iter.hasNext();) { - SyncFuture sync = iter.next(); - if (sync.getTxid() <= txid) { - markFutureDoneAndOffer(sync, txid, null); - iter.remove(); - finished++; - } else { - break; - } - } - return finished; - } - - // try advancing the highestSyncedTxid as much as possible - private int finishSync() { - if (unackedAppends.isEmpty()) { - // All outstanding appends have been acked. - if (toWriteAppends.isEmpty()) { - // Also no appends that wait to be written out, then just finished all pending syncs. - long maxSyncTxid = highestSyncedTxid.get(); - for (SyncFuture sync : syncFutures) { - maxSyncTxid = Math.max(maxSyncTxid, sync.getTxid()); - markFutureDoneAndOffer(sync, maxSyncTxid, null); + this.hasConsumerTask = () -> false; } - highestSyncedTxid.set(maxSyncTxid); - int finished = syncFutures.size(); - syncFutures.clear(); - return finished; } else { - // There is no append between highestProcessedAppendTxid and lowestUnprocessedAppendTxid, so - // if highestSyncedTxid >= highestProcessedAppendTxid, then all syncs whose txid are between - // highestProcessedAppendTxid and lowestUnprocessedAppendTxid can be finished. - long lowestUnprocessedAppendTxid = toWriteAppends.peek().getTxid(); - assert lowestUnprocessedAppendTxid > highestProcessedAppendTxid; - long doneTxid = lowestUnprocessedAppendTxid - 1; - highestSyncedTxid.set(doneTxid); - return finishSyncLowerThanTxid(doneTxid); + this.hasConsumerTask = () -> false; } } else { - // There are still unacked appends. So let's move the highestSyncedTxid to the txid of the - // first unacked append minus 1. - long lowestUnackedAppendTxid = unackedAppends.peek().getTxid(); - long doneTxid = Math.max(lowestUnackedAppendTxid - 1, highestSyncedTxid.get()); - highestSyncedTxid.set(doneTxid); - return finishSyncLowerThanTxid(doneTxid); + this.createSingleThreadPoolConsumeExecutor("AsyncFSWAL", rootDir, prefix); } - } - // confirm non-empty before calling - private static long getLastTxid(Deque queue) { - return queue.peekLast().getTxid(); + this.setWaitOnShutdownInSeconds(conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS, + DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS), ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS); } - private void appendAndSync() { - final AsyncWriter writer = this.writer; - // maybe a sync request is not queued when we issue a sync, so check here to see if we could - // finish some. - finishSync(); - long newHighestProcessedAppendTxid = -1L; - // this is used to avoid calling peedLast every time on unackedAppends, appendAndAsync is single - // threaded, this could save us some cycles - boolean addedToUnackedAppends = false; - for (Iterator iter = toWriteAppends.iterator(); iter.hasNext();) { - FSWALEntry entry = iter.next(); - boolean appended; - try { - appended = appendEntry(writer, entry); - } catch (IOException e) { - throw new AssertionError("should not happen", e); - } - newHighestProcessedAppendTxid = entry.getTxid(); - iter.remove(); - if (appended) { - // This is possible, when we fail to sync, we will add the unackedAppends back to - // toWriteAppends, so here we may get an entry which is already in the unackedAppends. - if ( - addedToUnackedAppends || unackedAppends.isEmpty() - || getLastTxid(unackedAppends) < entry.getTxid() - ) { - unackedAppends.addLast(entry); - addedToUnackedAppends = true; - } - // See HBASE-25905, here we need to make sure that, we will always write all the entries in - // unackedAppends out. As the code in the consume method will assume that, the entries in - // unackedAppends have all been sent out so if there is roll request and unackedAppends is - // not empty, we could just return as later there will be a syncCompleted call to clear the - // unackedAppends, or a syncFailed to lead us to another state. - // There could be other ways to fix, such as changing the logic in the consume method, but - // it will break the assumption and then (may) lead to a big refactoring. So here let's use - // this way to fix first, can optimize later. - if ( - writer.getLength() - fileLengthAtLastSync >= batchSize - && (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends)) - ) { - break; - } - } - } - // if we have a newer transaction id, update it. - // otherwise, use the previous transaction id. - if (newHighestProcessedAppendTxid > 0) { - highestProcessedAppendTxid = newHighestProcessedAppendTxid; - } else { - newHighestProcessedAppendTxid = highestProcessedAppendTxid; - } - - if (writer.getLength() - fileLengthAtLastSync >= batchSize) { - // sync because buffer size limit. - sync(writer); - return; - } - if (writer.getLength() == fileLengthAtLastSync) { - // we haven't written anything out, just advance the highestSyncedSequence since we may only - // stamped some region sequence id. - if (unackedAppends.isEmpty()) { - highestSyncedTxid.set(highestProcessedAppendTxid); - finishSync(); - trySetReadyForRolling(); - } - return; - } - // reach here means that we have some unsynced data but haven't reached the batch size yet - // but we will not issue a sync directly here even if there are sync requests because we may - // have some new data in the ringbuffer, so let's just return here and delay the decision of - // whether to issue a sync in the caller method. + @Override + protected CompletableFuture doWriterSync(AsyncWriter writer, boolean shouldUseHsync, + long txidWhenSyn) { + return writer.sync(shouldUseHsync); } private void drainNonMarkerEditsAndFailSyncs() { @@ -599,92 +216,11 @@ private void drainNonMarkerEditsAndFailSyncs() { } } - private void consume() { - consumeLock.lock(); - try { - int currentEpochAndState = epochAndState; - if (writerBroken(currentEpochAndState)) { - return; - } - if (waitingRoll(currentEpochAndState)) { - if (writer.getLength() > fileLengthAtLastSync) { - // issue a sync - sync(writer); - } else { - if (unackedAppends.isEmpty()) { - readyForRolling = true; - readyForRollingCond.signalAll(); - } - } - return; - } - } finally { - consumeLock.unlock(); - } - long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor - <= cursorBound; nextCursor++) { - if (!waitingConsumePayloads.isPublished(nextCursor)) { - break; - } - RingBufferTruck truck = waitingConsumePayloads.get(nextCursor); - switch (truck.type()) { - case APPEND: - toWriteAppends.addLast(truck.unloadAppend()); - break; - case SYNC: - syncFutures.add(truck.unloadSync()); - break; - default: - LOG.warn("RingBufferTruck with unexpected type: " + truck.type()); - break; - } - waitingConsumePayloadsGatingSequence.set(nextCursor); - } + @Override + protected void preAppendAndSync() { if (markerEditOnly()) { drainNonMarkerEditsAndFailSyncs(); } - appendAndSync(); - if (hasConsumerTask.get()) { - return; - } - if (toWriteAppends.isEmpty()) { - if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { - consumerScheduled.set(false); - // recheck here since in append and sync we do not hold the consumeLock. Thing may - // happen like - // 1. we check cursor, no new entry - // 2. someone publishes a new entry to ringbuffer and the consumerScheduled is true and - // give up scheduling the consumer task. - // 3. we set consumerScheduled to false and also give up scheduling consumer task. - if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { - // we will give up consuming so if there are some unsynced data we need to issue a sync. - if ( - writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() - && syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync - ) { - // no new data in the ringbuffer and we have at least one sync request - sync(writer); - } - return; - } else { - // maybe someone has grabbed this before us - if (!consumerScheduled.compareAndSet(false, true)) { - return; - } - } - } - } - // reschedule if we still have something to write. - consumeExecutor.execute(consumer); - } - - private boolean shouldScheduleConsumer() { - int currentEpochAndState = epochAndState; - if (writerBroken(currentEpochAndState) || waitingRoll(currentEpochAndState)) { - return false; - } - return consumerScheduled.compareAndSet(false, true); } // This is used by sync replication, where we are going to close the wal soon after we reopen all @@ -694,55 +230,11 @@ protected boolean markerEditOnly() { } @Override - protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException { + protected void precheckBeforeAppendWALEdit(RegionInfo hri, WALKeyImpl key, WALEdit edits, + boolean inMemstore) throws IOException { if (markerEditOnly() && !edits.isMetaEdit()) { throw new IOException("WAL is closing, only marker edit is allowed"); } - long txid = - stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads); - if (shouldScheduleConsumer()) { - consumeExecutor.execute(consumer); - } - return txid; - } - - @Override - protected void doSync(boolean forceSync) throws IOException { - long txid = waitingConsumePayloads.next(); - SyncFuture future; - try { - future = getSyncFuture(txid, forceSync); - RingBufferTruck truck = waitingConsumePayloads.get(txid); - truck.load(future); - } finally { - waitingConsumePayloads.publish(txid); - } - if (shouldScheduleConsumer()) { - consumeExecutor.execute(consumer); - } - blockOnSync(future); - } - - @Override - protected void doSync(long txid, boolean forceSync) throws IOException { - if (highestSyncedTxid.get() >= txid) { - return; - } - // here we do not use ring buffer sequence as txid - long sequence = waitingConsumePayloads.next(); - SyncFuture future; - try { - future = getSyncFuture(txid, forceSync); - RingBufferTruck truck = waitingConsumePayloads.get(sequence); - truck.load(future); - } finally { - waitingConsumePayloads.publish(sequence); - } - if (shouldScheduleConsumer()) { - consumeExecutor.execute(consumer); - } - blockOnSync(future); } protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) throws IOException { @@ -755,120 +247,11 @@ protected AsyncWriter createWriterInstance(Path path) throws IOException { return createAsyncWriter(fs, path); } - private void waitForSafePoint() { - consumeLock.lock(); - try { - int currentEpochAndState = epochAndState; - if (writerBroken(currentEpochAndState) || this.writer == null) { - return; - } - consumerScheduled.set(true); - epochAndState = currentEpochAndState | 1; - readyForRolling = false; - consumeExecutor.execute(consumer); - while (!readyForRolling) { - readyForRollingCond.awaitUninterruptibly(); - } - } finally { - consumeLock.unlock(); - } - } - - private void closeWriter(AsyncWriter writer, Path path) { - inflightWALClosures.put(path.getName(), writer); - closeExecutor.execute(() -> { - try { - writer.close(); - } catch (IOException e) { - LOG.warn("close old writer failed", e); - } finally { - // call this even if the above close fails, as there is no other chance we can set closed to - // true, it will not cause big problems. - markClosedAndClean(path); - inflightWALClosures.remove(path.getName()); - } - }); - } - @Override - protected void doReplaceWriter(Path oldPath, Path newPath, AsyncWriter nextWriter) - throws IOException { - Preconditions.checkNotNull(nextWriter); - waitForSafePoint(); - // we will call rollWriter in init method, where we want to create the first writer and - // obviously the previous writer is null, so here we need this null check. And why we must call - // logRollAndSetupWalProps before closeWriter is that, we will call markClosedAndClean after - // closing the writer asynchronously, we need to make sure the WALProps is put into - // walFile2Props before we call markClosedAndClean - if (writer != null) { - long oldFileLen = writer.getLength(); - logRollAndSetupWalProps(oldPath, newPath, oldFileLen); - closeWriter(writer, oldPath); - } else { - logRollAndSetupWalProps(oldPath, newPath, 0); - } - - this.writer = nextWriter; + protected void onWriterReplaced(AsyncWriter nextWriter) { if (nextWriter instanceof AsyncProtobufLogWriter) { this.fsOut = ((AsyncProtobufLogWriter) nextWriter).getOutput(); } - this.fileLengthAtLastSync = nextWriter.getLength(); - this.highestProcessedAppendTxidAtLastSync = 0L; - consumeLock.lock(); - try { - consumerScheduled.set(true); - int currentEpoch = epochAndState >>> 2; - int nextEpoch = currentEpoch == MAX_EPOCH ? 0 : currentEpoch + 1; - // set a new epoch and also clear waitingRoll and writerBroken - this.epochAndState = nextEpoch << 2; - // Reset rollRequested status - rollRequested.set(false); - consumeExecutor.execute(consumer); - } finally { - consumeLock.unlock(); - } - } - - @Override - protected void doShutdown() throws IOException { - waitForSafePoint(); - closeWriter(this.writer, getOldPath()); - this.writer = null; - closeExecutor.shutdown(); - try { - if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" - + " the close of async writer doesn't complete." - + "Please check the status of underlying filesystem" - + " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS - + "\""); - } - } catch (InterruptedException e) { - LOG.error("The wait for close of async writer is interrupted"); - Thread.currentThread().interrupt(); - } - IOException error = new IOException("WAL has been closed"); - long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; - // drain all the pending sync requests - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor - <= cursorBound; nextCursor++) { - if (!waitingConsumePayloads.isPublished(nextCursor)) { - break; - } - RingBufferTruck truck = waitingConsumePayloads.get(nextCursor); - switch (truck.type()) { - case SYNC: - syncFutures.add(truck.unloadSync()); - break; - default: - break; - } - } - // and fail them - syncFutures.forEach(f -> markFutureDoneAndOffer(f, f.getTxid(), error)); - if (!(consumeExecutor instanceof EventLoop)) { - consumeExecutor.shutdown(); - } } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index a1b07baadf67..28e6a460316a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -17,27 +17,15 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.ERROR; -import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.LOW_REPLICATION; -import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.SIZE; import static org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.RollRequestReason.SLOW_SYNC; -import com.lmax.disruptor.BlockingWaitStrategy; -import com.lmax.disruptor.EventHandler; -import com.lmax.disruptor.ExceptionHandler; -import com.lmax.disruptor.LifecycleAware; -import com.lmax.disruptor.TimeoutException; -import com.lmax.disruptor.dsl.Disruptor; -import com.lmax.disruptor.dsl.ProducerType; -import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; import java.util.List; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -45,14 +33,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.FSHLogProvider; -import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -61,8 +45,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** * The original implementation of FSWAL. */ @@ -110,8 +92,6 @@ public class FSHLog extends AbstractFSWAL { private static final String LOW_REPLICATION_ROLL_LIMIT = "hbase.regionserver.hlog.lowreplication.rolllimit"; private static final int DEFAULT_LOW_REPLICATION_ROLL_LIMIT = 5; - private static final String ROLL_ERRORS_TOLERATED = "hbase.regionserver.logroll.errors.tolerated"; - private static final int DEFAULT_ROLL_ERRORS_TOLERATED = 2; private static final String SYNCER_COUNT = "hbase.regionserver.hlog.syncer.count"; private static final int DEFAULT_SYNCER_COUNT = 5; private static final String MAX_BATCH_COUNT = "hbase.regionserver.wal.sync.batch.count"; @@ -121,22 +101,10 @@ public class FSHLog extends AbstractFSWAL { "hbase.wal.fshlog.wait.on.shutdown.seconds"; private static final int DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = 5; - /** - * The nexus at which all incoming handlers meet. Does appends and sync with an ordering. Appends - * and syncs are each put on the ring which means handlers need to smash up against the ring twice - * (can we make it once only? ... maybe not since time to append is so different from time to sync - * and sometimes we don't want to sync or we want to async the sync). The ring is where we make - * sure of our ordering and it is also where we do batching up of handler sync calls. - */ - private final Disruptor disruptor; - - /** - * This fellow is run by the above appendExecutor service but it is all about batching up appends - * and syncs; it may shutdown without cleaning out the last few appends or syncs. To guard against - * this, keep a reference to this handler and do explicit close on way out to make sure all - * flushed out before we exit. - */ - private final RingBufferEventHandler ringBufferEventHandler; + private static final IOException WITER_REPLACED_EXCEPTION = + new IOException("Writer was replaced!"); + private static final IOException WITER_BROKEN_EXCEPTION = new IOException("Wirter was broken!"); + private static final IOException WAL_CLOSE_EXCEPTION = new IOException("WAL was closed!"); /** * FSDataOutputStream associated with the current SequenceFile.writer @@ -161,37 +129,15 @@ public class FSHLog extends AbstractFSWAL { // Enable it if the replications recover. private volatile boolean lowReplicationRollEnabled = true; - /** Number of log close errors tolerated before we abort */ - private final int closeErrorsTolerated; - - private final AtomicInteger closeErrorCount = new AtomicInteger(); - - private final int waitOnShutdownInSeconds; + private final int syncerCount; + private final int maxSyncRequestCount; /** - * Exception handler to pass the disruptor ringbuffer. Same as native implementation only it logs - * using our logger instead of java native logger. + * Which syncrunner to use next. */ - static class RingBufferExceptionHandler implements ExceptionHandler { + private int syncRunnerIndex = 0; - @Override - public void handleEventException(Throwable ex, long sequence, RingBufferTruck event) { - LOG.error("Sequence=" + sequence + ", event=" + event, ex); - throw new RuntimeException(ex); - } - - @Override - public void handleOnStartException(Throwable ex) { - LOG.error(ex.toString(), ex); - throw new RuntimeException(ex); - } - - @Override - public void handleOnShutdownException(Throwable ex) { - LOG.error(ex.toString(), ex); - throw new RuntimeException(ex); - } - } + private SyncRunner[] syncRunners = null; /** * Constructor. @@ -246,29 +192,34 @@ public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); - this.closeErrorsTolerated = conf.getInt(ROLL_ERRORS_TOLERATED, DEFAULT_ROLL_ERRORS_TOLERATED); - this.waitOnShutdownInSeconds = - conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); - // This is the 'writer' -- a single threaded executor. This single thread 'consumes' what is - // put on the ring buffer. - String hostingThreadName = Thread.currentThread().getName(); - // Using BlockingWaitStrategy. Stuff that is going on here takes so long it makes no sense - // spinning as other strategies do. - this.disruptor = new Disruptor<>(RingBufferTruck::new, getPreallocatedEventCount(), - new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".append-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - ProducerType.MULTI, new BlockingWaitStrategy()); + // Advance the ring buffer sequence so that it starts from 1 instead of 0, // because SyncFuture.NOT_DONE = 0. - this.disruptor.getRingBuffer().next(); - int syncerCount = conf.getInt(SYNCER_COUNT, DEFAULT_SYNCER_COUNT); - int maxBatchCount = conf.getInt(MAX_BATCH_COUNT, + + this.syncerCount = conf.getInt(SYNCER_COUNT, DEFAULT_SYNCER_COUNT); + this.maxSyncRequestCount = conf.getInt(MAX_BATCH_COUNT, conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, DEFAULT_MAX_BATCH_COUNT)); - this.ringBufferEventHandler = new RingBufferEventHandler(syncerCount, maxBatchCount); - this.disruptor.setDefaultExceptionHandler(new RingBufferExceptionHandler()); - this.disruptor.handleEventsWith(new RingBufferEventHandler[] { this.ringBufferEventHandler }); - // Starting up threads in constructor is a no no; Interface should have an init call. - this.disruptor.start(); + + this.createSingleThreadPoolConsumeExecutor("FSHLog", rootDir, prefix); + + this.setWaitOnShutdownInSeconds( + conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS), + FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + } + + @Override + public void init() throws IOException { + super.init(); + this.createSyncRunnersAndStart(); + } + + private void createSyncRunnersAndStart() { + this.syncRunnerIndex = 0; + this.syncRunners = new SyncRunner[syncerCount]; + for (int i = 0; i < syncerCount; i++) { + this.syncRunners[i] = new SyncRunner("sync." + i, maxSyncRequestCount); + this.syncRunners[i].start(); + } } /** @@ -311,207 +262,70 @@ protected Writer createWriterInstance(final Path path) throws IOException { return writer; } - /** - * Used to manufacture race condition reliably. For testing only. - * @see #beforeWaitOnSafePoint() - */ - protected void afterCreatingZigZagLatch() { - } - - /** - * @see #afterCreatingZigZagLatch() - */ - protected void beforeWaitOnSafePoint() { - } - @Override protected void doAppend(Writer writer, FSWALEntry entry) throws IOException { writer.append(entry); } @Override - protected void doReplaceWriter(Path oldPath, Path newPath, Writer nextWriter) throws IOException { - // Ask the ring buffer writer to pause at a safe point. Once we do this, the writer - // thread will eventually pause. An error hereafter needs to release the writer thread - // regardless -- hence the finally block below. Note, this method is called from the FSHLog - // constructor BEFORE the ring buffer is set running so it is null on first time through - // here; allow for that. - SyncFuture syncFuture = null; - SafePointZigZagLatch zigzagLatch = null; - long sequence = -1L; - if (this.writer != null && this.ringBufferEventHandler != null) { - // Get sequence first to avoid dead lock when ring buffer is full - // Considering below sequence - // 1. replaceWriter is called and zigzagLatch is initialized - // 2. ringBufferEventHandler#onEvent is called and arrives at #attainSafePoint(long) then wait - // on safePointReleasedLatch - // 3. Since ring buffer is full, if we get sequence when publish sync, the replaceWriter - // thread will wait for the ring buffer to be consumed, but the only consumer is waiting - // replaceWriter thread to release safePointReleasedLatch, which causes a deadlock - sequence = getSequenceOnRingBuffer(); - zigzagLatch = this.ringBufferEventHandler.attainSafePoint(); - } - afterCreatingZigZagLatch(); - try { - // Wait on the safe point to be achieved. Send in a sync in case nothing has hit the - // ring buffer between the above notification of writer that we want it to go to - // 'safe point' and then here where we are waiting on it to attain safe point. Use - // 'sendSync' instead of 'sync' because we do not want this thread to block waiting on it - // to come back. Cleanup this syncFuture down below after we are ready to run again. - try { - if (zigzagLatch != null) { - // use assert to make sure no change breaks the logic that - // sequence and zigzagLatch will be set together - assert sequence > 0L : "Failed to get sequence from ring buffer"; - syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence, false)); - } - } catch (FailedSyncBeforeLogCloseException e) { - // If unflushed/unsynced entries on close, it is reason to abort. - if (isUnflushedEntries()) { - throw e; - } - LOG.warn( - "Failed sync-before-close but no outstanding appends; closing WAL" + e.getMessage()); - } - // It is at the safe point. Swap out writer from under the blocked writer thread. - // we will call rollWriter in init method, where we want to create the first writer and - // obviously the previous writer is null, so here we need this null check. And why we must - // call logRollAndSetupWalProps before closeWriter is that, we will call markClosedAndClean - // after closing the writer asynchronously, we need to make sure the WALProps is put into - // walFile2Props before we call markClosedAndClean - if (this.writer != null) { - long oldFileLen = this.writer.getLength(); - logRollAndSetupWalProps(oldPath, newPath, oldFileLen); - // In case of having unflushed entries or we already reached the - // closeErrorsTolerated count, call the closeWriter inline rather than in async - // way so that in case of an IOE we will throw it back and abort RS. - inflightWALClosures.put(oldPath.getName(), writer); - if (isUnflushedEntries() || closeErrorCount.get() >= this.closeErrorsTolerated) { - try { - closeWriter(this.writer, oldPath, true); - } finally { - inflightWALClosures.remove(oldPath.getName()); - } - } else { - Writer localWriter = this.writer; - closeExecutor.execute(() -> { - try { - closeWriter(localWriter, oldPath, false); - } catch (IOException e) { - LOG.warn("close old writer failed", e); - } finally { - // call this even if the above close fails, as there is no other chance we can set - // closed to true, it will not cause big problems. - markClosedAndClean(oldPath); - inflightWALClosures.remove(oldPath.getName()); - } - }); - } - } else { - logRollAndSetupWalProps(oldPath, newPath, 0); - } - - this.writer = nextWriter; - if (nextWriter != null && nextWriter instanceof ProtobufLogWriter) { - this.hdfs_out = ((ProtobufLogWriter) nextWriter).getStream(); - } else { - this.hdfs_out = null; - } - } catch (InterruptedException ie) { - // Perpetuate the interrupt - Thread.currentThread().interrupt(); - } catch (IOException e) { - long count = getUnflushedEntriesCount(); - LOG.error("Failed close of WAL writer " + oldPath + ", unflushedEntries=" + count, e); - throw new FailedLogCloseException(oldPath + ", unflushedEntries=" + count, e); - } finally { - // Let the writer thread go regardless, whether error or not. - if (zigzagLatch != null) { - // Reset rollRequested status - rollRequested.set(false); - zigzagLatch.releaseSafePoint(); - // syncFuture will be null if we failed our wait on safe point above. Otherwise, if - // latch was obtained successfully, the sync we threw in either trigger the latch or it - // got stamped with an exception because the WAL was damaged and we could not sync. Now - // the write pipeline has been opened up again by releasing the safe point, process the - // syncFuture we got above. This is probably a noop but it may be stale exception from - // when old WAL was in place. Catch it if so. - if (syncFuture != null) { - try { - blockOnSync(syncFuture); - } catch (IOException ioe) { - if (LOG.isTraceEnabled()) { - LOG.trace("Stale sync exception", ioe); - } - } - } - } + protected void onWriterReplaced(Writer nextWriter) { + if (nextWriter != null && nextWriter instanceof ProtobufLogWriter) { + this.hdfs_out = ((ProtobufLogWriter) nextWriter).getStream(); + } else { + this.hdfs_out = null; } + this.createSyncRunnersAndStart(); } - private void closeWriter(Writer writer, Path path, boolean syncCloseCall) throws IOException { - Span span = Span.current(); - try { - span.addEvent("closing writer"); - writer.close(); - span.addEvent("writer closed"); - } catch (IOException ioe) { - int errors = closeErrorCount.incrementAndGet(); - boolean hasUnflushedEntries = isUnflushedEntries(); - if (syncCloseCall && (hasUnflushedEntries || (errors > this.closeErrorsTolerated))) { - LOG.error("Close of WAL " + path + " failed. Cause=\"" + ioe.getMessage() + "\", errors=" - + errors + ", hasUnflushedEntries=" + hasUnflushedEntries); - throw ioe; + @Override + protected void doCleanUpResources() { + this.shutDownSyncRunners(); + }; + + private void shutDownSyncRunners() { + SyncRunner[] syncRunnersToUse = this.syncRunners; + if (syncRunnersToUse != null) { + for (SyncRunner syncRunner : syncRunnersToUse) { + syncRunner.shutDown(); } - LOG.warn("Riding over failed WAL close of " + path - + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", ioe); } + this.syncRunners = null; } @Override - protected void doShutdown() throws IOException { - // Shutdown the disruptor. Will stop after all entries have been processed. Make sure we - // have stopped incoming appends before calling this else it will not shutdown. We are - // conservative below waiting a long time and if not elapsed, then halting. - if (this.disruptor != null) { - long timeoutms = conf.getLong("hbase.wal.disruptor.shutdown.timeout.ms", 60000); - try { - this.disruptor.shutdown(timeoutms, TimeUnit.MILLISECONDS); - } catch (TimeoutException e) { - LOG.warn("Timed out bringing down disruptor after " + timeoutms + "ms; forcing halt " - + "(It is a problem if this is NOT an ABORT! -- DATALOSS!!!!)"); - this.disruptor.halt(); - this.disruptor.shutdown(); - } - } + protected CompletableFuture doWriterSync(Writer writer, boolean shouldUseHSync, + long txidWhenSync) { + CompletableFuture future = new CompletableFuture<>(); + SyncRequest syncRequest = new SyncRequest(writer, shouldUseHSync, txidWhenSync, future); + this.offerSyncRequest(syncRequest); + return future; + } - if (LOG.isDebugEnabled()) { - LOG.debug("Closing WAL writer in " + CommonFSUtils.getPath(walDir)); - } - if (this.writer != null) { - this.writer.close(); - this.writer = null; - } - closeExecutor.shutdown(); - try { - if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error( - "We have waited {} seconds but the close of writer(s) doesn't complete." - + "Please check the status of underlying filesystem" - + " or increase the wait time by the config \"{}\"", - this.waitOnShutdownInSeconds, FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + private void offerSyncRequest(SyncRequest syncRequest) { + for (int i = 0; i < this.syncRunners.length; i++) { + this.syncRunnerIndex = (this.syncRunnerIndex + 1) % this.syncRunners.length; + if (this.syncRunners[this.syncRunnerIndex].offer(syncRequest)) { + return; } - } catch (InterruptedException e) { - LOG.error("The wait for termination of FSHLog writer(s) is interrupted"); - Thread.currentThread().interrupt(); } + syncRequest.completableFuture + .completeExceptionally(new IOException("There is no available syncRunner.")); } - @Override - protected long append(final RegionInfo hri, final WALKeyImpl key, final WALEdit edits, - final boolean inMemstore) throws IOException { - return stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, - disruptor.getRingBuffer()); + static class SyncRequest { + private final Writer writer; + private final boolean shouldUseHSync; + private final long sequenceWhenSync; + private final CompletableFuture completableFuture; + + public SyncRequest(Writer writer, boolean shouldUseHSync, long txidWhenSync, + CompletableFuture completableFuture) { + this.writer = writer; + this.shouldUseHSync = shouldUseHSync; + this.sequenceWhenSync = txidWhenSync; + this.completableFuture = completableFuture; + } + } /** @@ -531,10 +345,9 @@ protected long append(final RegionInfo hri, final WALKeyImpl key, final WALEdit * completes. */ private class SyncRunner extends Thread { - private volatile long sequence; // Keep around last exception thrown. Clear on successful sync. - private final BlockingQueue syncFutures; - private volatile SyncFuture takeSyncFuture = null; + private final BlockingQueue syncRequests; + private volatile boolean shutDown = false; SyncRunner(final String name, final int maxHandlersCount) { super(name); @@ -551,183 +364,154 @@ private class SyncRunner extends Thread { // the meta table when succesful (i.e. sync), closing handlers -- etc. These are usually // much fewer in number than the user-space handlers so Q-size should be user handlers plus // some space for these other handlers. Lets multiply by 3 for good-measure. - this.syncFutures = new LinkedBlockingQueue<>(maxHandlersCount * 3); + this.syncRequests = new LinkedBlockingQueue<>(maxHandlersCount * 3); } - void offer(final long sequence, final SyncFuture[] syncFutures, final int syncFutureCount) { - // Set sequence first because the add to the queue will wake the thread if sleeping. - this.sequence = sequence; - for (int i = 0; i < syncFutureCount; ++i) { - this.syncFutures.add(syncFutures[i]); + boolean offer(SyncRequest syncRequest) { + if (this.shutDown) { + return false; } - } - /** - * Release the passed syncFuture - * @return Returns 1. - */ - private int releaseSyncFuture(final SyncFuture syncFuture, final long currentSequence, - final Throwable t) { - if (!syncFuture.done(currentSequence, t)) { - throw new IllegalStateException(); + if (!this.syncRequests.offer(syncRequest)) { + return false; } - // This function releases one sync future only. - return 1; + // recheck + if (this.shutDown) { + if (this.syncRequests.remove(syncRequest)) { + return false; + } + } + return true; } - /** - * Release all SyncFutures whose sequence is <= currentSequence. - * @param t May be non-null if we are processing SyncFutures because an exception was thrown. - * @return Count of SyncFutures we let go. - */ - private int releaseSyncFutures(final long currentSequence, final Throwable t) { - int syncCount = 0; - for (SyncFuture syncFuture; (syncFuture = this.syncFutures.peek()) != null;) { - if (syncFuture.getTxid() > currentSequence) { + private void completeSyncRequests(SyncRequest syncRequest, long syncedSequenceId) { + if (syncRequest != null) { + syncRequest.completableFuture.complete(syncedSequenceId); + } + while (true) { + SyncRequest head = this.syncRequests.peek(); + if (head == null) { break; } - releaseSyncFuture(syncFuture, currentSequence, t); - if (!this.syncFutures.remove(syncFuture)) { - throw new IllegalStateException(syncFuture.toString()); + if (head.sequenceWhenSync > syncedSequenceId) { + break; } - syncCount++; + head.completableFuture.complete(syncedSequenceId); + this.syncRequests.poll(); } - return syncCount; } - /** - * @param sequence The sequence we ran the filesystem sync against. - * @return Current highest synced sequence. - */ - private long updateHighestSyncedSequence(long sequence) { - long currentHighestSyncedSequence; - // Set the highestSyncedSequence IFF our current sequence id is the 'highest'. - do { - currentHighestSyncedSequence = highestSyncedTxid.get(); - if (currentHighestSyncedSequence >= sequence) { - // Set the sync number to current highwater mark; might be able to let go more - // queued sync futures - sequence = currentHighestSyncedSequence; + private void completeExceptionallySyncRequests(SyncRequest syncRequest, Exception exception) { + if (syncRequest != null) { + syncRequest.completableFuture.completeExceptionally(exception); + } + while (true) { + SyncRequest head = this.syncRequests.peek(); + if (head == null) { break; } - } while (!highestSyncedTxid.compareAndSet(currentHighestSyncedSequence, sequence)); - return sequence; + if (head.writer != syncRequest.writer) { + break; + } + head.completableFuture.completeExceptionally(exception); + this.syncRequests.poll(); + } } - boolean areSyncFuturesReleased() { - // check whether there is no sync futures offered, and no in-flight sync futures that is being - // processed. - return syncFutures.size() <= 0 && takeSyncFuture == null; + private SyncRequest takeSyncRequest() throws InterruptedException { + while (true) { + // We have to process what we 'take' from the queue + SyncRequest syncRequest = this.syncRequests.take(); + // See if we can process any syncfutures BEFORE we go sync. + long currentHighestSyncedSequence = highestSyncedTxid.get(); + if (syncRequest.sequenceWhenSync < currentHighestSyncedSequence) { + syncRequest.completableFuture.complete(currentHighestSyncedSequence); + continue; + } + return syncRequest; + } } @Override public void run() { - long currentSequence; - while (!isInterrupted()) { - int syncCount = 0; - + while (!this.shutDown) { try { - // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs - // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED - // java.lang.NullPointerException - // at org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:582) - // at java.lang.Thread.run(Thread.java:748) - SyncFuture sf; - while (true) { - takeSyncFuture = null; - // We have to process what we 'take' from the queue - takeSyncFuture = this.syncFutures.take(); - // Make local copy. - sf = takeSyncFuture; - currentSequence = this.sequence; - long syncFutureSequence = sf.getTxid(); - if (syncFutureSequence > currentSequence) { - throw new IllegalStateException("currentSequence=" + currentSequence - + ", syncFutureSequence=" + syncFutureSequence); - } - // See if we can process any syncfutures BEFORE we go sync. - long currentHighestSyncedSequence = highestSyncedTxid.get(); - if (currentSequence < currentHighestSyncedSequence) { - syncCount += releaseSyncFuture(sf, currentHighestSyncedSequence, null); - // Done with the 'take'. Go around again and do a new 'take'. - continue; - } - break; - } + SyncRequest syncRequest = this.takeSyncRequest(); // I got something. Lets run. Save off current sequence number in case it changes // while we run. - long start = System.nanoTime(); - Throwable lastException = null; + long currentSequenceToUse = syncRequest.sequenceWhenSync; + boolean writerBroken = isWriterBroken(); + long currentHighestProcessedAppendTxid = highestProcessedAppendTxid; + Writer currentWriter = writer; + if (currentWriter != syncRequest.writer) { + syncRequest.completableFuture.completeExceptionally(WITER_REPLACED_EXCEPTION); + continue; + } + if (writerBroken) { + syncRequest.completableFuture.completeExceptionally(WITER_BROKEN_EXCEPTION); + continue; + } + if (currentHighestProcessedAppendTxid > currentSequenceToUse) { + currentSequenceToUse = currentHighestProcessedAppendTxid; + } + Exception lastException = null; try { - long unSyncedFlushSeq = highestUnsyncedTxid; - writer.sync(sf.isForceSync()); - if (unSyncedFlushSeq > currentSequence) { - currentSequence = unSyncedFlushSeq; - } - currentSequence = updateHighestSyncedSequence(currentSequence); + writer.sync(syncRequest.shouldUseHSync); } catch (IOException e) { - LOG.error("Error syncing, request close of WAL", e); + LOG.error("Error syncing", e); lastException = e; } catch (Exception e) { LOG.warn("UNEXPECTED", e); lastException = e; } finally { - // First release what we 'took' from the queue. - syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException); - // Can we release other syncs? - syncCount += releaseSyncFutures(currentSequence, lastException); if (lastException != null) { - requestLogRoll(ERROR); + this.completeExceptionallySyncRequests(syncRequest, lastException); } else { - checkLogRoll(); + this.completeSyncRequests(syncRequest, currentSequenceToUse); } } - postSync(System.nanoTime() - start, syncCount); } catch (InterruptedException e) { // Presume legit interrupt. - Thread.currentThread().interrupt(); + LOG.info("interrupted"); } catch (Throwable t) { LOG.warn("UNEXPECTED, continuing", t); } } + this.clearSyncRequestsWhenShutDown(); + } + + private void clearSyncRequestsWhenShutDown() { + while (true) { + SyncRequest syncRequest = this.syncRequests.poll(); + if (syncRequest == null) { + break; + } + syncRequest.completableFuture.completeExceptionally(WAL_CLOSE_EXCEPTION); + } + } + + void shutDown() { + try { + this.shutDown = true; + this.interrupt(); + this.join(); + } catch (InterruptedException e) { + LOG.warn("interrupted", e); + Thread.currentThread().interrupt(); + } } } - /** - * Schedule a log roll if needed. - */ - private boolean checkLogRoll() { - // If we have already requested a roll, do nothing + @Override + protected void checkSlowSyncCount() { if (isLogRollRequested()) { - return false; - } - // Will return immediately if we are in the middle of a WAL log roll currently. - if (!rollWriterLock.tryLock()) { - return false; + return; } - try { - if (doCheckLogLowReplication()) { - LOG.warn("Requesting log roll because of low replication, current pipeline: " - + Arrays.toString(getPipeline())); - requestLogRoll(LOW_REPLICATION); - return true; - } else if (writer != null && writer.getLength() > logrollsize) { - if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" - + writer.getLength() + ", logrollsize=" + logrollsize); - } - requestLogRoll(SIZE); - return true; - } else if (doCheckSlowSync()) { - // We log this already in checkSlowSync - requestLogRoll(SLOW_SYNC); - return true; - } - } finally { - rollWriterLock.unlock(); + if (doCheckSlowSync()) { + // We log this already in checkSlowSync + requestLogRoll(SLOW_SYNC); } - return false; } /** Returns true if number of replicas for the WAL is lower than threshold */ @@ -777,33 +561,6 @@ protected boolean doCheckLogLowReplication() { return logRollNeeded; } - protected long getSequenceOnRingBuffer() { - return this.disruptor.getRingBuffer().next(); - } - - private SyncFuture publishSyncOnRingBuffer(boolean forceSync) { - long sequence = getSequenceOnRingBuffer(); - return publishSyncOnRingBuffer(sequence, forceSync); - } - - protected SyncFuture publishSyncOnRingBuffer(long sequence, boolean forceSync) { - // here we use ring buffer sequence as transaction id - SyncFuture syncFuture = getSyncFuture(sequence, forceSync); - try { - RingBufferTruck truck = this.disruptor.getRingBuffer().get(sequence); - truck.load(syncFuture); - } finally { - this.disruptor.getRingBuffer().publish(sequence); - } - return syncFuture; - } - - // Sync all known transactions - private void publishSyncThenBlockOnCompletion(boolean forceSync) throws IOException { - SyncFuture syncFuture = publishSyncOnRingBuffer(forceSync); - blockOnSync(syncFuture); - } - /** * {@inheritDoc} *

@@ -824,20 +581,6 @@ int getLogReplication() { return 0; } - @Override - protected void doSync(boolean forceSync) throws IOException { - publishSyncThenBlockOnCompletion(forceSync); - } - - @Override - protected void doSync(long txid, boolean forceSync) throws IOException { - if (this.highestSyncedTxid.get() >= txid) { - // Already sync'd. - return; - } - publishSyncThenBlockOnCompletion(forceSync); - } - boolean isLowReplicationRollEnabled() { return lowReplicationRollEnabled; } @@ -846,361 +589,6 @@ boolean isLowReplicationRollEnabled() { ClassSize.align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) + (3 * Bytes.SIZEOF_INT) + (4 * Bytes.SIZEOF_LONG)); - /** - * This class is used coordinating two threads holding one thread at a 'safe point' while the - * orchestrating thread does some work that requires the first thread paused: e.g. holding the WAL - * writer while its WAL is swapped out from under it by another thread. - *

- * Thread A signals Thread B to hold when it gets to a 'safe point'. Thread A wait until Thread B - * gets there. When the 'safe point' has been attained, Thread B signals Thread A. Thread B then - * holds at the 'safe point'. Thread A on notification that Thread B is paused, goes ahead and - * does the work it needs to do while Thread B is holding. When Thread A is done, it flags B and - * then Thread A and Thread B continue along on their merry way. Pause and signalling 'zigzags' - * between the two participating threads. We use two latches -- one the inverse of the other -- - * pausing and signaling when states are achieved. - *

- * To start up the drama, Thread A creates an instance of this class each time it would do this - * zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only). - * Thread B notices the new instance (via reading a volatile reference or how ever) and it starts - * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it - * cannot proceed until the Thread B 'safe point' is attained. Thread A will be held inside in - * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread - * B frees Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the - * 'safe point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it - * blocks here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it - * needs to do while Thread B is paused. When finished, it lets Thread B lose by calling - * {@link #releaseSafePoint()} and away go both Threads again. - */ - static class SafePointZigZagLatch { - /** - * Count down this latch when safe point attained. - */ - private volatile CountDownLatch safePointAttainedLatch = new CountDownLatch(1); - /** - * Latch to wait on. Will be released when we can proceed. - */ - private volatile CountDownLatch safePointReleasedLatch = new CountDownLatch(1); - - private void checkIfSyncFailed(SyncFuture syncFuture) throws FailedSyncBeforeLogCloseException { - Throwable t = syncFuture.getThrowable(); - if (t != null) { - throw new FailedSyncBeforeLogCloseException(t); - } - } - - /** - * For Thread A to call when it is ready to wait on the 'safe point' to be attained. Thread A - * will be held in here until Thread B calls {@link #safePointAttained()} - * @param syncFuture We need this as barometer on outstanding syncs. If it comes home with an - * exception, then something is up w/ our syncing. - * @return The passed syncFuture - */ - SyncFuture waitSafePoint(SyncFuture syncFuture) - throws InterruptedException, FailedSyncBeforeLogCloseException { - while (!this.safePointAttainedLatch.await(1, TimeUnit.MILLISECONDS)) { - checkIfSyncFailed(syncFuture); - } - checkIfSyncFailed(syncFuture); - return syncFuture; - } - - /** Returns if the safepoint has been attained. */ - @InterfaceAudience.Private - boolean isSafePointAttained() { - return this.safePointAttainedLatch.getCount() == 0; - } - - /** - * Called by Thread B when it attains the 'safe point'. In this method, Thread B signals Thread - * A it can proceed. Thread B will be held in here until {@link #releaseSafePoint()} is called - * by Thread A. - */ - void safePointAttained() throws InterruptedException { - this.safePointAttainedLatch.countDown(); - this.safePointReleasedLatch.await(); - } - - /** - * Called by Thread A when it is done with the work it needs to do while Thread B is halted. - * This will release the Thread B held in a call to {@link #safePointAttained()} - */ - void releaseSafePoint() { - this.safePointReleasedLatch.countDown(); - } - - /** Returns True is this is a 'cocked', fresh instance, and not one that has already fired. */ - boolean isCocked() { - return this.safePointAttainedLatch.getCount() > 0 - && this.safePointReleasedLatch.getCount() > 0; - } - } - - /** - * Handler that is run by the disruptor ringbuffer consumer. Consumer is a SINGLE - * 'writer/appender' thread. Appends edits and starts up sync runs. Tries its best to batch up - * syncs. There is no discernible benefit batching appends so we just append as they come in - * because it simplifies the below implementation. See metrics for batching effectiveness (In - * measurement, at 100 concurrent handlers writing 1k, we are batching > 10 appends and 10 handler - * sync invocations for every actual dfsclient sync call; at 10 concurrent handlers, YMMV). - *

- * Herein, we have an array into which we store the sync futures as they come in. When we have a - * 'batch', we'll then pass what we have collected to a SyncRunner thread to do the filesystem - * sync. When it completes, it will then call {@link SyncFuture#done(long, Throwable)} on each of - * SyncFutures in the batch to release blocked Handler threads. - *

- * I've tried various effects to try and make latencies low while keeping throughput high. I've - * tried keeping a single Queue of SyncFutures in this class appending to its tail as the syncs - * coming and having sync runner threads poll off the head to 'finish' completed SyncFutures. I've - * tried linkedlist, and various from concurrent utils whether LinkedBlockingQueue or - * ArrayBlockingQueue, etc. The more points of synchronization, the more 'work' (according to - * 'perf stats') that has to be done; small increases in stall percentages seem to have a big - * impact on throughput/latencies. The below model where we have an array into which we stash the - * syncs and then hand them off to the sync thread seemed like a decent compromise. See HBASE-8755 - * for more detail. - */ - class RingBufferEventHandler implements EventHandler, LifecycleAware { - private final SyncRunner[] syncRunners; - private final SyncFuture[] syncFutures; - // Had 'interesting' issues when this was non-volatile. On occasion, we'd not pass all - // syncFutures to the next sync'ing thread. - private AtomicInteger syncFuturesCount = new AtomicInteger(); - private volatile SafePointZigZagLatch zigzagLatch; - /** - * Set if we get an exception appending or syncing so that all subsequence appends and syncs on - * this WAL fail until WAL is replaced. - */ - private Exception exception = null; - /** - * Object to block on while waiting on safe point. - */ - private final Object safePointWaiter = new Object(); - private volatile boolean shutdown = false; - - /** - * Which syncrunner to use next. - */ - private int syncRunnerIndex; - - RingBufferEventHandler(final int syncRunnerCount, final int maxBatchCount) { - this.syncFutures = new SyncFuture[maxBatchCount]; - this.syncRunners = new SyncRunner[syncRunnerCount]; - for (int i = 0; i < syncRunnerCount; i++) { - this.syncRunners[i] = new SyncRunner("sync." + i, maxBatchCount); - } - } - - private void cleanupOutstandingSyncsOnException(final long sequence, final Exception e) { - // There could be handler-count syncFutures outstanding. - for (int i = 0; i < this.syncFuturesCount.get(); i++) { - this.syncFutures[i].done(sequence, e); - } - offerDoneSyncsBackToCache(); - } - - /** - * Offers the finished syncs back to the cache for reuse. - */ - private void offerDoneSyncsBackToCache() { - for (int i = 0; i < this.syncFuturesCount.get(); i++) { - syncFutureCache.offer(syncFutures[i]); - } - this.syncFuturesCount.set(0); - } - - /** Returns True if outstanding sync futures still */ - private boolean isOutstandingSyncs() { - // Look at SyncFutures in the EventHandler - for (int i = 0; i < this.syncFuturesCount.get(); i++) { - if (!this.syncFutures[i].isDone()) { - return true; - } - } - - return false; - } - - private boolean isOutstandingSyncsFromRunners() { - // Look at SyncFutures in the SyncRunners - for (SyncRunner syncRunner : syncRunners) { - if (syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { - return true; - } - } - return false; - } - - @Override - // We can set endOfBatch in the below method if at end of our this.syncFutures array - public void onEvent(final RingBufferTruck truck, final long sequence, boolean endOfBatch) - throws Exception { - // Appends and syncs are coming in order off the ringbuffer. We depend on this fact. We'll - // add appends to dfsclient as they come in. Batching appends doesn't give any significant - // benefit on measurement. Handler sync calls we will batch up. If we get an exception - // appending an edit, we fail all subsequent appends and syncs with the same exception until - // the WAL is reset. It is important that we not short-circuit and exit early this method. - // It is important that we always go through the attainSafePoint on the end. Another thread, - // the log roller may be waiting on a signal from us here and will just hang without it. - - try { - if (truck.type() == RingBufferTruck.Type.SYNC) { - this.syncFutures[this.syncFuturesCount.getAndIncrement()] = truck.unloadSync(); - // Force flush of syncs if we are carrying a full complement of syncFutures. - if (this.syncFuturesCount.get() == this.syncFutures.length) { - endOfBatch = true; - } - } else if (truck.type() == RingBufferTruck.Type.APPEND) { - FSWALEntry entry = truck.unloadAppend(); - try { - if (this.exception != null) { - // Return to keep processing events coming off the ringbuffer - return; - } - append(entry); - } catch (Exception e) { - // Failed append. Record the exception. - this.exception = e; - // invoking cleanupOutstandingSyncsOnException when append failed with exception, - // it will cleanup existing sync requests recorded in syncFutures but not offered to - // SyncRunner yet, - // so there won't be any sync future left over if no further truck published to - // disruptor. - cleanupOutstandingSyncsOnException(sequence, - this.exception instanceof DamagedWALException - ? this.exception - : new DamagedWALException("On sync", this.exception)); - // Return to keep processing events coming off the ringbuffer - return; - } finally { - entry.release(); - } - } else { - // What is this if not an append or sync. Fail all up to this!!! - cleanupOutstandingSyncsOnException(sequence, - new IllegalStateException("Neither append nor sync")); - // Return to keep processing. - return; - } - - // TODO: Check size and if big go ahead and call a sync if we have enough data. - // This is a sync. If existing exception, fall through. Else look to see if batch. - if (this.exception == null) { - // If not a batch, return to consume more events from the ring buffer before proceeding; - // we want to get up a batch of syncs and appends before we go do a filesystem sync. - if (!endOfBatch || this.syncFuturesCount.get() <= 0) { - return; - } - // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as follows: - // * The maximum value possible for syncRunners.length is Integer.MAX_INT - // * syncRunnerIndex starts at 0 and is incremented only here - // * after the increment, the value is bounded by the '%' operator to - // [0, syncRunners.length), presuming the value was positive prior to - // the '%' operator. - // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in - // syncRunnerIndex ensuring that it can't grow without bound and overflow. - // * note that the value after the increment must be positive, because the most it - // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. - this.syncRunnerIndex = (this.syncRunnerIndex + 1) % this.syncRunners.length; - try { - // Below expects that the offer 'transfers' responsibility for the outstanding syncs to - // the syncRunner. We should never get an exception in here. - this.syncRunners[this.syncRunnerIndex].offer(sequence, this.syncFutures, - this.syncFuturesCount.get()); - } catch (Exception e) { - // Should NEVER get here. - requestLogRoll(ERROR); - this.exception = new DamagedWALException("Failed offering sync", e); - } - } - // We may have picked up an exception above trying to offer sync - if (this.exception != null) { - cleanupOutstandingSyncsOnException(sequence, - this.exception instanceof DamagedWALException - ? this.exception - : new DamagedWALException("On sync", this.exception)); - } - attainSafePoint(sequence); - // It is critical that we offer the futures back to the cache for reuse here after the - // safe point is attained and all the clean up has been done. There have been - // issues with reusing sync futures early causing WAL lockups, see HBASE-25984. - offerDoneSyncsBackToCache(); - } catch (Throwable t) { - LOG.error("UNEXPECTED!!! syncFutures.length=" + this.syncFutures.length, t); - } - } - - SafePointZigZagLatch attainSafePoint() { - this.zigzagLatch = new SafePointZigZagLatch(); - return this.zigzagLatch; - } - - /** - * Check if we should attain safe point. If so, go there and then wait till signalled before we - * proceeding. - */ - private void attainSafePoint(final long currentSequence) { - if (this.zigzagLatch == null || !this.zigzagLatch.isCocked()) { - return; - } - // If here, another thread is waiting on us to get to safe point. Don't leave it hanging. - beforeWaitOnSafePoint(); - try { - // Wait on outstanding syncers; wait for them to finish syncing (unless we've been - // shutdown or unless our latch has been thrown because we have been aborted or unless - // this WAL is broken and we can't get a sync/append to complete). - while ( - (!this.shutdown && this.zigzagLatch.isCocked() - && highestSyncedTxid.get() < currentSequence && - // We could be in here and all syncs are failing or failed. Check for this. Otherwise - // we'll just be stuck here for ever. In other words, ensure there syncs running. - isOutstandingSyncs()) - // Wait for all SyncRunners to finish their work so that we can replace the writer - || isOutstandingSyncsFromRunners() - ) { - synchronized (this.safePointWaiter) { - this.safePointWaiter.wait(0, 1); - } - } - // Tell waiting thread we've attained safe point. Can clear this.throwable if set here - // because we know that next event through the ringbuffer will be going to a new WAL - // after we do the zigzaglatch dance. - this.exception = null; - this.zigzagLatch.safePointAttained(); - } catch (InterruptedException e) { - LOG.warn("Interrupted ", e); - Thread.currentThread().interrupt(); - } - } - - /** - * Append to the WAL. Does all CP and WAL listener calls. - */ - void append(final FSWALEntry entry) throws Exception { - try { - FSHLog.this.appendEntry(writer, entry); - } catch (Exception e) { - String msg = - "Append sequenceId=" + entry.getKey().getSequenceId() + ", requesting roll of WAL"; - LOG.warn(msg, e); - requestLogRoll(ERROR); - throw new DamagedWALException(msg, e); - } - } - - @Override - public void onStart() { - for (SyncRunner syncRunner : this.syncRunners) { - syncRunner.start(); - } - } - - @Override - public void onShutdown() { - for (SyncRunner syncRunner : this.syncRunners) { - syncRunner.interrupt(); - } - } - } - /** * This method gets the pipeline for the current WAL. */ @@ -1221,4 +609,5 @@ Writer getWriter() { void setWriter(Writer writer) { this.writer = writer; } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index 476e3bd330bc..333ec4b78b1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -93,6 +94,7 @@ public void setup() throws IOException { CONF = TEST_UTIL.getConfiguration(); // Disable block cache. CONF.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); + CONF.setLong(AbstractFSWAL.WAL_SYNC_TIMEOUT_MS, 10000); dir = TEST_UTIL.getDataTestDir("TestHRegion").toString(); tableName = TableName.valueOf(name.getMethodName()); } @@ -258,22 +260,16 @@ public void testLockupAroundBadAssignSync() throws IOException { dodgyWAL.throwSyncException = true; Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("2"), value); + region.rsServices = services; region.put(put); } catch (IOException ioe) { threwOnSync = true; } - // An append in the WAL but the sync failed is a server abort condition. That is our - // current semantic. Verify. It takes a while for abort to be called. Just hang here till it - // happens. If it don't we'll timeout the whole test. That is fine. - while (true) { - try { - verify(services, atLeast(1)).abort(anyString(), any(Throwable.class)); - break; - } catch (WantedButNotInvoked t) { - Threads.sleep(1); - } - } + region.rsServices = null; + // An append in the WAL but the sync failed is a server abort condition. That is our + // current semantic. Verify. + verify(services, atLeast(1)).abort(anyString(), any()); try { dodgyWAL.throwAppendException = false; dodgyWAL.throwSyncException = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index f7e2de16c5d7..03bbbbe47ae6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -147,6 +147,8 @@ import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; @@ -178,6 +180,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -260,6 +263,7 @@ public void setup() throws IOException { method = name.getMethodName(); tableName = TableName.valueOf(method); CONF.set(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, String.valueOf(0.09)); + CONF.setLong(AbstractFSWAL.WAL_SYNC_TIMEOUT_MS, 10000); } @After @@ -5415,7 +5419,14 @@ public void testPutWithMemStoreFlush() throws Exception { assertArrayEquals(Bytes.toBytes("value1"), CellUtil.cloneValue(kvs.get(0))); } + /** + * For this test,the spied {@link AsyncFSWAL} can not work properly because of a Mockito defect + * that can not deal with classes which have a field of an inner class. See discussions in + * HBASE-15536.When we reuse the code of {@link AsyncFSWAL} for {@link FSHLog}, this test could + * not work for {@link FSHLog} also. + */ @Test + @Ignore public void testDurability() throws Exception { // there are 5 x 5 cases: // table durability(SYNC,FSYNC,ASYC,SKIP,USE_DEFAULT) x mutation @@ -5469,6 +5480,7 @@ private void durabilityTest(String method, Durability tableDurability, Durability mutationDurability, long timeout, boolean expectAppend, final boolean expectSync, final boolean expectSyncFromLogSyncer) throws Exception { Configuration conf = HBaseConfiguration.create(CONF); + conf.setLong(AbstractFSWAL.WAL_SHUTDOWN_WAIT_TIMEOUT_MS, 60 * 60 * 1000); method = method + "_" + tableDurability.name() + "_" + mutationDurability.name(); byte[] family = Bytes.toBytes("family"); Path logDir = new Path(new Path(dir + method), "log"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java deleted file mode 100644 index 75f6c4868994..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.NavigableMap; -import java.util.TreeMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.regionserver.wal.FSHLog; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.apache.hadoop.hbase.wal.WALProvider.Writer; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.io.Closeables; - -/** - * Testing for lock up of FSHLog. - */ -@Category({ RegionServerTests.class, MediumTests.class }) -public class TestWALLockup { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALLockup.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestWALLockup.class); - - @Rule - public TestName name = new TestName(); - - private static final String COLUMN_FAMILY = "MyCF"; - private static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); - - private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static Configuration CONF; - private String dir; - - // Test names - protected TableName tableName; - - @Before - public void setup() throws IOException { - CONF = TEST_UTIL.getConfiguration(); - // Disable block cache. - CONF.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); - dir = TEST_UTIL.getDataTestDir("TestHRegion").toString(); - tableName = TableName.valueOf(name.getMethodName()); - } - - @After - public void tearDown() throws Exception { - EnvironmentEdgeManagerTestHelper.reset(); - LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir()); - TEST_UTIL.cleanupTestDir(); - } - - private String getName() { - return name.getMethodName(); - } - - // A WAL that we can have throw exceptions when a flag is set. - private static final class DodgyFSLog extends FSHLog { - // Set this when want the WAL to start throwing exceptions. - volatile boolean throwException = false; - - // Latch to hold up processing until after another operation has had time to run. - CountDownLatch latch = new CountDownLatch(1); - - public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { - super(fs, root, logDir, conf); - } - - @Override - protected void afterCreatingZigZagLatch() { - // If throwException set, then append will throw an exception causing the WAL to be - // rolled. We'll come in here. Hold up processing until a sync can get in before - // the zigzag has time to complete its setup and get its own sync in. This is what causes - // the lock up we've seen in production. - if (throwException) { - try { - LOG.info("LATCHED"); - // So, timing can have it that the test can run and the bad flush below happens - // before we get here. In this case, we'll be stuck waiting on this latch but there - // is nothing in the WAL pipeline to get us to the below beforeWaitOnSafePoint... - // because all WALs have rolled. In this case, just give up on test. - if (!this.latch.await(5, TimeUnit.SECONDS)) { - LOG.warn("GIVE UP! Failed waiting on latch...Test is ABORTED!"); - } - } catch (InterruptedException e) { - } - } - } - - @Override - protected void beforeWaitOnSafePoint() { - if (throwException) { - LOG.info("COUNTDOWN"); - // Don't countdown latch until someone waiting on it otherwise, the above - // afterCreatingZigZagLatch will get to the latch and no one will ever free it and we'll - // be stuck; test won't go down - while (this.latch.getCount() <= 0) - Threads.sleep(1); - this.latch.countDown(); - } - } - - @Override - protected Writer createWriterInstance(Path path) throws IOException { - final Writer w = super.createWriterInstance(path); - return new Writer() { - @Override - public void close() throws IOException { - w.close(); - } - - @Override - public void sync(boolean forceSync) throws IOException { - if (throwException) { - throw new IOException("FAKE! Failed to replace a bad datanode...SYNC"); - } - w.sync(forceSync); - } - - @Override - public void append(Entry entry) throws IOException { - if (throwException) { - throw new IOException("FAKE! Failed to replace a bad datanode...APPEND"); - } - w.append(entry); - } - - @Override - public long getLength() { - return w.getLength(); - } - - @Override - public long getSyncedLength() { - return w.getSyncedLength(); - } - }; - } - } - - /** - * Reproduce locking up that happens when we get an inopportune sync during setup for zigzaglatch - * wait. See HBASE-14317. If below is broken, we will see this test timeout because it is locked - * up. - *

- * First I need to set up some mocks for Server and RegionServerServices. I also need to set up a - * dodgy WAL that will throw an exception when we go to append to it. - */ - @Test - public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { - // Mocked up server and regionserver services. Needed below. - RegionServerServices services = Mockito.mock(RegionServerServices.class); - Mockito.when(services.getConfiguration()).thenReturn(CONF); - Mockito.when(services.isStopped()).thenReturn(false); - Mockito.when(services.isAborted()).thenReturn(false); - - // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL, go ahead with test. - FileSystem fs = FileSystem.get(CONF); - Path rootDir = new Path(dir + getName()); - DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF); - dodgyWAL.init(); - Path originalWAL = dodgyWAL.getCurrentFileName(); - // I need a log roller running. - LogRoller logRoller = new LogRoller(services); - logRoller.addWAL(dodgyWAL); - // There is no 'stop' once a logRoller is running.. it just dies. - logRoller.start(); - // Now get a region and start adding in edits. - final HRegion region = initHRegion(tableName, null, null, CONF, dodgyWAL); - byte[] bytes = Bytes.toBytes(getName()); - NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); - scopes.put(COLUMN_FAMILY_BYTES, 0); - MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); - try { - // First get something into memstore. Make a Put and then pull the Cell out of it. Will - // manage append and sync carefully in below to manufacture hang. We keep adding same - // edit. WAL subsystem doesn't care. - Put put = new Put(bytes); - put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes); - WALKeyImpl key = new WALKeyImpl(region.getRegionInfo().getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime(), mvcc, scopes); - WALEdit edit = new WALEdit(); - CellScanner CellScanner = put.cellScanner(); - assertTrue(CellScanner.advance()); - edit.add(CellScanner.current()); - // Put something in memstore and out in the WAL. Do a big number of appends so we push - // out other side of the ringbuffer. If small numbers, stuff doesn't make it to WAL - for (int i = 0; i < 1000; i++) { - region.put(put); - } - // Set it so we start throwing exceptions. - LOG.info("SET throwing of exception on append"); - dodgyWAL.throwException = true; - // This append provokes a WAL roll request - dodgyWAL.appendData(region.getRegionInfo(), key, edit); - boolean exception = false; - try { - dodgyWAL.sync(false); - } catch (Exception e) { - exception = true; - } - assertTrue("Did not get sync exception", exception); - - // Get a memstore flush going too so we have same hung profile as up in the issue over - // in HBASE-14317. Flush hangs trying to get sequenceid because the ringbuffer is held up - // by the zigzaglatch waiting on syncs to come home. - Thread t = new Thread("Flusher") { - @Override - public void run() { - try { - if (region.getMemStoreDataSize() <= 0) { - throw new IOException("memstore size=" + region.getMemStoreDataSize()); - } - region.flush(false); - } catch (IOException e) { - // Can fail trying to flush in middle of a roll. Not a failure. Will succeed later - // when roll completes. - LOG.info("In flush", e); - } - LOG.info("Exiting"); - } - }; - t.setDaemon(true); - t.start(); - // Wait until - while (dodgyWAL.latch.getCount() > 0) { - Threads.sleep(1); - } - // Now assert I got a new WAL file put in place even though loads of errors above. - assertTrue(originalWAL != dodgyWAL.getCurrentFileName()); - // Can I append to it? - dodgyWAL.throwException = false; - try { - region.put(put); - } catch (Exception e) { - LOG.info("In the put", e); - } - } finally { - // To stop logRoller, its server has to say it is stopped. - Mockito.when(services.isStopped()).thenReturn(true); - Closeables.close(logRoller, true); - try { - if (region != null) { - region.close(); - } - if (dodgyWAL != null) { - dodgyWAL.close(); - } - } catch (Exception e) { - LOG.info("On way out", e); - } - } - } - - /** - * If below is broken, we will see this test timeout because RingBufferEventHandler was stuck in - * attainSafePoint. Everyone will wait for sync to finish forever. See HBASE-14317. - */ - @Test - public void testRingBufferEventHandlerStuckWhenSyncFailed() - throws IOException, InterruptedException { - - // A WAL that we can have throw exceptions and slow FSHLog.replaceWriter down - class DodgyFSLog extends FSHLog { - - private volatile boolean zigZagCreated = false; - - public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { - super(fs, root, logDir, conf); - } - - @Override - protected void afterCreatingZigZagLatch() { - zigZagCreated = true; - // Sleep a while to wait for RingBufferEventHandler to get stuck first. - try { - Thread.sleep(3000); - } catch (InterruptedException ignore) { - } - } - - @Override - protected long getSequenceOnRingBuffer() { - return super.getSequenceOnRingBuffer(); - } - - protected void publishSyncOnRingBufferAndBlock(long sequence) { - try { - super.blockOnSync(super.publishSyncOnRingBuffer(sequence, false)); - Assert.fail("Expect an IOException here."); - } catch (IOException ignore) { - // Here, we will get an IOException. - } - } - - @Override - protected Writer createWriterInstance(Path path) throws IOException { - final Writer w = super.createWriterInstance(path); - return new Writer() { - @Override - public void close() throws IOException { - w.close(); - } - - @Override - public void sync(boolean forceSync) throws IOException { - throw new IOException("FAKE! Failed to replace a bad datanode...SYNC"); - } - - @Override - public void append(Entry entry) throws IOException { - w.append(entry); - } - - @Override - public long getLength() { - return w.getLength(); - } - - @Override - public long getSyncedLength() { - return w.getSyncedLength(); - } - }; - } - } - - // Mocked up server and regionserver services. Needed below. - RegionServerServices services = Mockito.mock(RegionServerServices.class); - Mockito.when(services.getConfiguration()).thenReturn(CONF); - Mockito.when(services.isStopped()).thenReturn(false); - Mockito.when(services.isAborted()).thenReturn(false); - - // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL, go ahead with test. - FileSystem fs = FileSystem.get(CONF); - Path rootDir = new Path(dir + getName()); - final DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF); - // I need a log roller running. - LogRoller logRoller = new LogRoller(services); - logRoller.addWAL(dodgyWAL); - // There is no 'stop' once a logRoller is running.. it just dies. - logRoller.start(); - - try { - final long seqForSync = dodgyWAL.getSequenceOnRingBuffer(); - - // This call provokes a WAL roll, and we will get a new RingBufferEventHandler.ZigZagLatch - // in LogRoller. - // After creating ZigZagLatch, RingBufferEventHandler would get stuck due to sync event, - // as long as HBASE-14317 hasn't be fixed. - LOG.info("Trigger log roll for creating a ZigZagLatch."); - logRoller.requestRollAll(); - - while (!dodgyWAL.zigZagCreated) { - Thread.sleep(10); - } - - // Send a sync event for RingBufferEventHandler, - // and it gets blocked in RingBufferEventHandler.attainSafePoint - LOG.info("Send sync for RingBufferEventHandler"); - Thread syncThread = new Thread() { - @Override - public void run() { - dodgyWAL.publishSyncOnRingBufferAndBlock(seqForSync); - } - }; - // Sync in another thread to avoid reset SyncFuture again. - syncThread.start(); - syncThread.join(); - - try { - LOG.info("Call sync for testing whether RingBufferEventHandler is hanging."); - dodgyWAL.sync(false); // Should not get a hang here, otherwise we will see timeout in this - // test. - Assert.fail("Expect an IOException here."); - } catch (IOException ignore) { - } - - } finally { - // To stop logRoller, its server has to say it is stopped. - Mockito.when(services.isStopped()).thenReturn(true); - if (logRoller != null) { - logRoller.close(); - } - if (dodgyWAL != null) { - dodgyWAL.close(); - } - } - } - - /** - * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} - * when done. - */ - private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - Configuration conf, WAL wal) throws IOException { - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, - MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, - Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index ecd518631b0d..2e7c97ef4de4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -545,7 +545,7 @@ private AbstractFSWAL createHoldingWAL(String testName, AtomicBoolean startHo CountDownLatch holdAppend) throws IOException { AbstractFSWAL wal = newWAL(FS, CommonFSUtils.getRootDir(CONF), testName, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); - wal.init(); + // newWAL has already called wal.init() wal.registerWALActionsListener(new WALActionsListener() { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index 5b3cbfa3b1a3..07a97a1e0e97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import java.io.IOException; import java.lang.reflect.Field; @@ -29,7 +27,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -37,7 +34,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; @@ -53,10 +49,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -113,14 +107,9 @@ public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldExcepti HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); log.init(); try { - Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler"); - ringBufferEventHandlerField.setAccessible(true); - FSHLog.RingBufferEventHandler ringBufferEventHandler = - (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log); - Field syncRunnerIndexField = - FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex"); + Field syncRunnerIndexField = FSHLog.class.getDeclaredField("syncRunnerIndex"); syncRunnerIndexField.setAccessible(true); - syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1); + syncRunnerIndexField.set(log, Integer.MAX_VALUE - 1); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build(); @@ -138,93 +127,6 @@ public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldExcepti } } - /** - * Test for WAL stall due to sync future overwrites. See HBASE-25984. - */ - @Test - public void testDeadlockWithSyncOverwrites() throws Exception { - final CountDownLatch blockBeforeSafePoint = new CountDownLatch(1); - - class FailingWriter implements WALProvider.Writer { - @Override - public void sync(boolean forceSync) throws IOException { - throw new IOException("Injected failure.."); - } - - @Override - public void append(WAL.Entry entry) throws IOException { - } - - @Override - public long getLength() { - return 0; - } - - @Override - public long getSyncedLength() { - return 0; - } - - @Override - public void close() throws IOException { - } - } - - /* - * Custom FSHLog implementation with a conditional wait before attaining safe point. - */ - class CustomFSHLog extends FSHLog { - public CustomFSHLog(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix) throws IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); - } - - @Override - protected void beforeWaitOnSafePoint() { - try { - assertTrue(blockBeforeSafePoint.await(TEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - public SyncFuture publishSyncOnRingBuffer() { - long sequence = getSequenceOnRingBuffer(); - return publishSyncOnRingBuffer(sequence, false); - } - } - - final String name = this.name.getMethodName(); - try (CustomFSHLog log = new CustomFSHLog(FS, CommonFSUtils.getRootDir(CONF), name, - HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null)) { - log.setWriter(new FailingWriter()); - Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler"); - ringBufferEventHandlerField.setAccessible(true); - FSHLog.RingBufferEventHandler ringBufferEventHandler = - (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log); - // Force a safe point - FSHLog.SafePointZigZagLatch latch = ringBufferEventHandler.attainSafePoint(); - try { - SyncFuture future0 = log.publishSyncOnRingBuffer(); - // Wait for the sync to be done. - Waiter.waitFor(CONF, TEST_TIMEOUT_MS, future0::isDone); - // Publish another sync from the same thread, this should not overwrite the done sync. - SyncFuture future1 = log.publishSyncOnRingBuffer(); - assertFalse(future1.isDone()); - // Unblock the safe point trigger.. - blockBeforeSafePoint.countDown(); - // Wait for the safe point to be reached. - // With the deadlock in HBASE-25984, this is never possible, thus blocking the sync - // pipeline. - Waiter.waitFor(CONF, TEST_TIMEOUT_MS, latch::isSafePointAttained); - } finally { - // Force release the safe point, for the clean up. - latch.releaseSafePoint(); - } - } - } - /** * Test case for https://issues.apache.org/jira/browse/HBASE-16721 */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 09c19dde65f8..90f595003cb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -17,8 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver.wal; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; @@ -52,21 +57,29 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; +import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.ipc.RemoteException; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Throwables; + /** * Tests for conditions that should trigger RegionServer aborts when rolling the current WAL fails. */ +@RunWith(Parameterized.class) @Category({ RegionServerTests.class, MediumTests.class }) public class TestLogRollAbort { @@ -103,14 +116,23 @@ public static void setUpBeforeClass() throws Exception { // the namenode might still try to choose the recently-dead datanode // for a pipeline, so try to a new pipeline multiple times TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 10); - TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "asyncfs"); + } + + @Parameters(name = "{index}: walProvider={0}") + public static List params() { + return Arrays.asList(new Object[] { "filesystem" }, new Object[] { "asyncfs" }); } private Configuration conf; private FileSystem fs; + @Parameter + public String walProvider; + @Before public void setUp() throws Exception { + TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, walProvider); TEST_UTIL.startMiniCluster(2); cluster = TEST_UTIL.getHBaseCluster(); @@ -211,7 +233,7 @@ public void testLogRollAfterSplitStart() throws IOException { } // Send the data to HDFS datanodes and close the HDFS writer log.sync(); - ((AbstractFSWAL) log).replaceWriter(((FSHLog) log).getOldPath(), null, null); + closeWriter((AbstractFSWAL) log); // code taken from MasterFileSystem.getLogDirs(), which is called from // MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process) @@ -226,16 +248,13 @@ public void testLogRollAfterSplitStart() throws IOException { WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals); LOG.debug("Trying to roll the WAL."); - try { - log.rollWriter(); - Assert.fail("rollWriter() did not throw any exception."); - } catch (IOException ioe) { - if (ioe.getCause() instanceof FileNotFoundException) { - LOG.info("Got the expected exception: ", ioe.getCause()); - } else { - Assert.fail("Unexpected exception: " + ioe); - } + IOException error = assertThrows(IOException.class, () -> log.rollWriter()); + if (error instanceof RemoteException) { + error = ((RemoteException) error).unwrapRemoteException(); } + assertTrue("unexpected error: " + Throwables.getStackTraceAsString(error), + error instanceof FileNotFoundException + || error.getCause() instanceof FileNotFoundException); } finally { wals.close(); if (fs.exists(thisTestsDir)) { @@ -243,4 +262,14 @@ public void testLogRollAfterSplitStart() throws IOException { } } } + + private void closeWriter(AbstractFSWAL wal) { + wal.waitForSafePoint(); + long oldFileLen = wal.writer.getLength(); + wal.closeWriter(wal.writer, wal.getOldPath()); + wal.logRollAndSetupWalProps(wal.getOldPath(), null, oldFileLen); + wal.writer = null; + wal.onWriterReplaced(null); + wal.rollRequested.set(false); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index c098140fbe93..f07a02cb25d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -319,108 +319,131 @@ void batchWriteAndWait(Table table, final FSHLog log, int start, boolean expect, */ @Test public void testLogRollOnDatanodeDeath() throws Exception { - TEST_UTIL.ensureSomeRegionServersAvailable(2); - assertTrue("This test requires WAL file replication set to 2.", - fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2); - LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); - this.server = cluster.getRegionServer(0); + Long oldValue = TEST_UTIL.getConfiguration() + .getLong("hbase.regionserver.hlog.check.lowreplication.interval", -1); - // Create the test table and open it - TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + try { + /** + * When we reuse the code of AsyncFSWAL to FSHLog, the low replication is only checked by + * {@link LogRoller#checkLowReplication},so in order to make this test spend less time,we + * should minimize following config which is maximized by + * {@link AbstractTestLogRolling#setUpBeforeClass} + */ + TEST_UTIL.getConfiguration().setLong("hbase.regionserver.hlog.check.lowreplication.interval", + 1000); + this.tearDown(); + this.setUp(); + + TEST_UTIL.ensureSomeRegionServersAvailable(2); + assertTrue("This test requires WAL file replication set to 2.", + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2); + LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); - admin.createTable(desc); - Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); + this.server = cluster.getRegionServer(0); + + // Create the test table and open it + TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + + admin.createTable(desc); + Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); - server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); - RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo(); - final FSHLog log = (FSHLog) server.getWAL(region); - final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false); - - log.registerWALActionsListener(new WALActionsListener() { - @Override - public void logRollRequested(WALActionsListener.RollRequestReason reason) { - switch (reason) { - case LOW_REPLICATION: - lowReplicationHookCalled.lazySet(true); - break; - default: - break; + server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); + RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo(); + final FSHLog log = (FSHLog) server.getWAL(region); + final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false); + + log.registerWALActionsListener(new WALActionsListener() { + @Override + public void logRollRequested(WALActionsListener.RollRequestReason reason) { + switch (reason) { + case LOW_REPLICATION: + lowReplicationHookCalled.lazySet(true); + break; + default: + break; + } + } + }); + + // add up the datanode count, to ensure proper replication when we kill 1 + // This function is synchronous; when it returns, the dfs cluster is active + // We start 3 servers and then stop 2 to avoid a directory naming conflict + // when we stop/start a namenode later, as mentioned in HBASE-5163 + List existingNodes = dfsCluster.getDataNodes(); + int numDataNodes = 3; + TEST_UTIL.getConfiguration().setLong("hbase.regionserver.hlog.check.lowreplication.interval", + 1000); + dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), numDataNodes, true, null, null); + List allNodes = dfsCluster.getDataNodes(); + for (int i = allNodes.size() - 1; i >= 0; i--) { + if (existingNodes.contains(allNodes.get(i))) { + dfsCluster.stopDataNode(i); } } - }); - - // add up the datanode count, to ensure proper replication when we kill 1 - // This function is synchronous; when it returns, the dfs cluster is active - // We start 3 servers and then stop 2 to avoid a directory naming conflict - // when we stop/start a namenode later, as mentioned in HBASE-5163 - List existingNodes = dfsCluster.getDataNodes(); - int numDataNodes = 3; - dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), numDataNodes, true, null, null); - List allNodes = dfsCluster.getDataNodes(); - for (int i = allNodes.size() - 1; i >= 0; i--) { - if (existingNodes.contains(allNodes.get(i))) { - dfsCluster.stopDataNode(i); - } - } - assertTrue( - "DataNodes " + dfsCluster.getDataNodes().size() + " default replication " - + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()), - dfsCluster.getDataNodes().size() - >= fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1); - - writeData(table, 2); - - long curTime = EnvironmentEdgeManager.currentTime(); - LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName()); - long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); - assertTrue("Log should have a timestamp older than now", - curTime > oldFilenum && oldFilenum != -1); - - assertTrue("The log shouldn't have rolled yet", - oldFilenum == AbstractFSWALProvider.extractFileNumFromWAL(log)); - final DatanodeInfo[] pipeline = log.getPipeline(); - assertTrue(pipeline.length == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); - - // kill a datanode in the pipeline to force a log roll on the next sync() - // This function is synchronous, when it returns the node is killed. - assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null); - - // this write should succeed, but trigger a log roll - writeData(table, 2); - long newFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); - - assertTrue("Missing datanode should've triggered a log roll", - newFilenum > oldFilenum && newFilenum > curTime); - - assertTrue("The log rolling hook should have been called with the low replication flag", - lowReplicationHookCalled.get()); - - // write some more log data (this should use a new hdfs_out) - writeData(table, 3); - assertTrue("The log should not roll again.", - AbstractFSWALProvider.extractFileNumFromWAL(log) == newFilenum); - // kill another datanode in the pipeline, so the replicas will be lower than - // the configured value 2. - assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null); - - batchWriteAndWait(table, log, 3, false, 14000); - int replication = log.getLogReplication(); - assertTrue("LowReplication Roller should've been disabled, current replication=" + replication, - !log.isLowReplicationRollEnabled()); - - dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), 1, true, null, null); - - // Force roll writer. The new log file will have the default replications, - // and the LowReplication Roller will be enabled. - log.rollWriter(true); - batchWriteAndWait(table, log, 13, true, 10000); - replication = log.getLogReplication(); - assertTrue("New log file should have the default replication instead of " + replication, - replication == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); - assertTrue("LowReplication Roller should've been enabled", log.isLowReplicationRollEnabled()); + assertTrue( + "DataNodes " + dfsCluster.getDataNodes().size() + " default replication " + + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()), + dfsCluster.getDataNodes().size() + >= fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1); + + writeData(table, 2); + + long curTime = EnvironmentEdgeManager.currentTime(); + LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName()); + long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); + assertTrue("Log should have a timestamp older than now", + curTime > oldFilenum && oldFilenum != -1); + + assertTrue("The log shouldn't have rolled yet", + oldFilenum == AbstractFSWALProvider.extractFileNumFromWAL(log)); + final DatanodeInfo[] pipeline = log.getPipeline(); + assertTrue(pipeline.length == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); + + // kill a datanode in the pipeline to force a log roll on the next sync() + // This function is synchronous, when it returns the node is killed. + assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null); + + // this write should succeed, but trigger a log roll + writeData(table, 2); + + TEST_UTIL.waitFor(10000, 100, () -> { + long newFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); + return newFilenum > oldFilenum && newFilenum > curTime && lowReplicationHookCalled.get(); + }); + + long newFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); + + // write some more log data (this should use a new hdfs_out) + writeData(table, 3); + assertTrue("The log should not roll again.", + AbstractFSWALProvider.extractFileNumFromWAL(log) == newFilenum); + // kill another datanode in the pipeline, so the replicas will be lower than + // the configured value 2. + assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null); + + batchWriteAndWait(table, log, 3, false, 14000); + int replication = log.getLogReplication(); + assertTrue( + "LowReplication Roller should've been disabled, current replication=" + replication, + !log.isLowReplicationRollEnabled()); + + dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), 1, true, null, null); + + // Force roll writer. The new log file will have the default replications, + // and the LowReplication Roller will be enabled. + log.rollWriter(true); + batchWriteAndWait(table, log, 13, true, 10000); + replication = log.getLogReplication(); + assertTrue("New log file should have the default replication instead of " + replication, + replication == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); + assertTrue("LowReplication Roller should've been enabled", log.isLowReplicationRollEnabled()); + } finally { + TEST_UTIL.getConfiguration().setLong("hbase.regionserver.hlog.check.lowreplication.interval", + oldValue); + } } /** diff --git a/pom.xml b/pom.xml index 7958e5c221f2..0382e2ecf4fa 100644 --- a/pom.xml +++ b/pom.xml @@ -819,7 +819,7 @@ 2.11.0 3.9 3.6.1 - 3.4.2 + 3.4.4 4.5.13 4.4.13 3.2.6 From 2c92e6fdce85cb02a4a5c2ecb714fd2efa30835c Mon Sep 17 00:00:00 2001 From: Dimitrios Efthymiou Date: Mon, 17 Jul 2023 07:03:01 +0100 Subject: [PATCH 028/514] HBASE-27906 Fix the javadoc for SyncFutureCache (#5325) --- .../regionserver/wal/SyncFutureCache.java | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java index 986b9ca036d3..f842cc3dabe5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java @@ -27,12 +27,20 @@ /** * A cache of {@link SyncFuture}s. This class supports two methods - * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer()}. Usage pattern: - * SyncFuture sf = syncFutureCache.getIfPresentOrNew(); sf.reset(...); // Use the sync future - * finally: syncFutureCache.offer(sf); Offering the sync future back to the cache makes it eligible - * for reuse within the same thread context. Cache keyed by the accessing thread instance and - * automatically invalidated if it remains unused for - * {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. + * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer(SyncFuture)}}. + *

+ * Usage pattern: + * + *

+ *   SyncFuture sf = syncFutureCache.getIfPresentOrNew();
+ *   sf.reset(...);
+ *   // Use the sync future
+ *   finally: syncFutureCache.offer(sf);
+ * 
+ * + * Offering the sync future back to the cache makes it eligible for reuse within the same thread + * context. Cache keyed by the accessing thread instance and automatically invalidated if it remains + * unused for {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. */ @InterfaceAudience.Private public final class SyncFutureCache { From 73ea43f5d7ffc42e8b8a3e8396653dc7c7d73bf0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 22 Jul 2023 21:24:49 +0800 Subject: [PATCH 029/514] HBASE-27984 NPE in MigrateReplicationQueueFromZkToTableProcedure recovery (#5329) Signed-off-by: GeorryHuang --- .../java/org/apache/hadoop/hbase/master/HMaster.java | 9 +++++++++ .../org/apache/hadoop/hbase/master/MasterServices.java | 7 +++++++ .../hbase/master/replication/AddPeerProcedure.java | 6 +++--- .../MigrateReplicationQueueFromZkToTableProcedure.java | 8 ++++---- .../hbase/master/replication/ReplicationPeerManager.java | 8 -------- .../hbase/replication/master/ReplicationLogCleaner.java | 8 +++++--- .../hadoop/hbase/master/MockNoopMasterServices.java | 6 ++++++ .../hadoop/hbase/master/cleaner/TestLogsCleaner.java | 3 ++- ...estMigrateReplicationQueueFromZkToTableProcedure.java | 4 ++-- .../replication/master/TestReplicationLogCleaner.java | 4 ++-- 10 files changed, 40 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8cb40cb58803..1c77e8dfaafa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -229,6 +229,7 @@ import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration; import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; +import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier; import org.apache.hadoop.hbase.replication.master.ReplicationSinkTrackerTableCreator; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.ReplicationSyncUpToolInfo; @@ -364,6 +365,9 @@ public class HMaster extends HBaseServerBase implements Maste private RSGroupInfoManager rsGroupInfoManager; + private final ReplicationLogCleanerBarrier replicationLogCleanerBarrier = + new ReplicationLogCleanerBarrier(); + // manager of replication private ReplicationPeerManager replicationPeerManager; @@ -4106,6 +4110,11 @@ public ReplicationPeerManager getReplicationPeerManager() { return replicationPeerManager; } + @Override + public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { + return replicationLogCleanerBarrier; + } + public HashMap>> getReplicationLoad(ServerName[] serverNames) { List peerList = this.getReplicationPeerManager().listPeers(null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 1958e64767eb..d450fbb45ac0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier; import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; @@ -361,6 +362,12 @@ ReplicationPeerConfig getReplicationPeerConfig(String peerId) */ ReplicationPeerManager getReplicationPeerManager(); + /** + * Returns the {@link ReplicationLogCleanerBarrier}. It will be used at multiple places so we put + * it in MasterServices directly. + */ + ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier(); + /** * Returns the {@link SyncReplicationReplayWALManager}. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java index 1d02fab5f194..c469896d3e7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java @@ -86,7 +86,7 @@ protected ReplicationPeerConfig getNewPeerConfig() { @Override protected void releaseLatch(MasterProcedureEnv env) { if (cleanerDisabled) { - env.getReplicationPeerManager().getReplicationLogCleanerBarrier().enable(); + env.getMasterServices().getReplicationLogCleanerBarrier().enable(); } if (peerConfig.isSyncReplication()) { env.getReplicationPeerManager().releaseSyncReplicationPeerLock(); @@ -97,7 +97,7 @@ protected void releaseLatch(MasterProcedureEnv env) { @Override protected void prePeerModification(MasterProcedureEnv env) throws IOException, ReplicationException, ProcedureSuspendedException { - if (!env.getReplicationPeerManager().getReplicationLogCleanerBarrier().disable()) { + if (!env.getMasterServices().getReplicationLogCleanerBarrier().disable()) { throw suspend(env.getMasterConfiguration(), backoff -> LOG.warn("LogCleaner is run at the same time when adding peer {}, sleep {} secs", peerId, backoff / 1000)); @@ -142,7 +142,7 @@ protected void afterReplay(MasterProcedureEnv env) { // when executing the procedure we will try to disable and acquire. return; } - if (!env.getReplicationPeerManager().getReplicationLogCleanerBarrier().disable()) { + if (!env.getMasterServices().getReplicationLogCleanerBarrier().disable()) { throw new IllegalStateException("can not disable log cleaner, this should not happen"); } cleanerDisabled = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java index b7c4e33ef858..c88d613e5260 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java @@ -115,7 +115,7 @@ private void shutdownExecutorService() { private void disableReplicationLogCleaner(MasterProcedureEnv env) throws ProcedureSuspendedException { - if (!env.getReplicationPeerManager().getReplicationLogCleanerBarrier().disable()) { + if (!env.getMasterServices().getReplicationLogCleanerBarrier().disable()) { // it is not likely that we can reach here as we will schedule this procedure immediately // after master restarting, where ReplicationLogCleaner should have not started its first run // yet. But anyway, let's make the code more robust. And it is safe to wait a bit here since @@ -130,7 +130,7 @@ private void disableReplicationLogCleaner(MasterProcedureEnv env) } private void enableReplicationLogCleaner(MasterProcedureEnv env) { - env.getReplicationPeerManager().getReplicationLogCleanerBarrier().enable(); + env.getMasterServices().getReplicationLogCleanerBarrier().enable(); } private void waitUntilNoPeerProcedure(MasterProcedureEnv env) throws ProcedureSuspendedException { @@ -224,7 +224,7 @@ protected Flow executeFromState(MasterProcedureEnv env, lockEntry = procLock.getLockEntry(getProcId()); } catch (IOException ioe) { LOG.error("Error while acquiring execution lock for procedure {}" - + " when trying to wake it up, aborting...", ioe); + + " when trying to wake it up, aborting...", this, ioe); env.getMasterServices().abort("Can not acquire procedure execution lock", e); return; } @@ -304,7 +304,7 @@ protected void afterReplay(MasterProcedureEnv env) { // when executing the procedure we will try to disable and acquire. return; } - if (!env.getReplicationPeerManager().getReplicationLogCleanerBarrier().disable()) { + if (!env.getMasterServices().getReplicationLogCleanerBarrier().disable()) { throw new IllegalStateException("can not disable log cleaner, this should not happen"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 8b01225e553e..53a7a6f00146 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration.MigrationIterator; import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration.ZkLastPushedSeqId; import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration.ZkReplicationQueueData; -import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -115,9 +114,6 @@ public class ReplicationPeerManager implements ConfigurationObserver { // Only allow to add one sync replication peer concurrently private final Semaphore syncReplicationPeerLock = new Semaphore(1); - private final ReplicationLogCleanerBarrier replicationLogCleanerBarrier = - new ReplicationLogCleanerBarrier(); - private final String clusterId; private volatile Configuration conf; @@ -725,10 +721,6 @@ public void releaseSyncReplicationPeerLock() { syncReplicationPeerLock.release(); } - public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { - return replicationLogCleanerBarrier; - } - @Override public void onConfigurationChange(Configuration conf) { this.conf = conf; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 3ab52da6158e..6ebcac7e453a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -65,6 +65,7 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate { // queue for a given peer, that why we can use a String peerId as key instead of // ReplicationQueueId. private Map>> replicationOffsets; + private ReplicationLogCleanerBarrier barrier; private ReplicationPeerManager rpm; private Supplier> getNotFullyDeadServers; @@ -84,7 +85,7 @@ public void preClean() { LOG.error("Error occurred while executing queueStorage.hasData()", e); return; } - canFilter = rpm.getReplicationLogCleanerBarrier().start(); + canFilter = barrier.start(); if (canFilter) { notFullyDeadServers = getNotFullyDeadServers.get(); peerIds = rpm.listPeers(null).stream().map(ReplicationPeerDescription::getPeerId) @@ -98,7 +99,7 @@ public void preClean() { allQueueData = rpm.getQueueStorage().listAllQueues(); } catch (ReplicationException e) { LOG.error("Can not list all replication queues, give up cleaning", e); - rpm.getReplicationLogCleanerBarrier().stop(); + barrier.stop(); canFilter = false; notFullyDeadServers = null; peerIds = null; @@ -122,7 +123,7 @@ public void preClean() { @Override public void postClean() { if (canFilter) { - rpm.getReplicationLogCleanerBarrier().stop(); + barrier.stop(); canFilter = false; // release memory notFullyDeadServers = null; @@ -244,6 +245,7 @@ public void init(Map params) { Object master = params.get(HMaster.MASTER); if (master != null && master instanceof MasterServices) { MasterServices m = (MasterServices) master; + barrier = m.getReplicationLogCleanerBarrier(); rpm = m.getReplicationPeerManager(); getNotFullyDeadServers = () -> getNotFullyDeadServers(m); return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 26f6ac512f2f..d526358ceb4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier; import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; @@ -524,4 +525,9 @@ public boolean replicationPeerModificationSwitch(boolean on) throws IOException public boolean isReplicationPeerModificationEnabled() { return false; } + + @Override + public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 5d474bc21640..699d9f963da7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -132,10 +132,11 @@ public void beforeTest() throws Exception { masterServices = mock(MasterServices.class); when(masterServices.getConnection()).thenReturn(TEST_UTIL.getConnection()); + when(masterServices.getReplicationLogCleanerBarrier()) + .thenReturn(new ReplicationLogCleanerBarrier()); ReplicationPeerManager rpm = mock(ReplicationPeerManager.class); when(masterServices.getReplicationPeerManager()).thenReturn(rpm); when(rpm.getQueueStorage()).thenReturn(queueStorage); - when(rpm.getReplicationLogCleanerBarrier()).thenReturn(new ReplicationLogCleanerBarrier()); when(rpm.listPeers(null)).thenReturn(new ArrayList<>()); ServerManager sm = mock(ServerManager.class); when(masterServices.getServerManager()).thenReturn(sm); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueueFromZkToTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueueFromZkToTableProcedure.java index cb795edcd623..a2709548bc3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueueFromZkToTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueueFromZkToTableProcedure.java @@ -214,8 +214,8 @@ public void testDisablePeerAndWaitStates() throws Exception { EXTRA_REGION_SERVERS .put(ServerName.valueOf("localhost", 54321, EnvironmentEdgeManager.currentTime()), metrics); - ReplicationLogCleanerBarrier barrier = UTIL.getHBaseCluster().getMaster() - .getReplicationPeerManager().getReplicationLogCleanerBarrier(); + ReplicationLogCleanerBarrier barrier = + UTIL.getHBaseCluster().getMaster().getReplicationLogCleanerBarrier(); assertTrue(barrier.start()); ProcedureExecutor procExec = getMasterProcedureExecutor(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestReplicationLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestReplicationLogCleaner.java index 7edadae03b14..a1850b68eba5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestReplicationLogCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestReplicationLogCleaner.java @@ -80,8 +80,8 @@ public class TestReplicationLogCleaner { @Before public void setUp() throws ReplicationException { services = mock(MasterServices.class); + when(services.getReplicationLogCleanerBarrier()).thenReturn(new ReplicationLogCleanerBarrier()); ReplicationPeerManager rpm = mock(ReplicationPeerManager.class); - when(rpm.getReplicationLogCleanerBarrier()).thenReturn(new ReplicationLogCleanerBarrier()); when(services.getReplicationPeerManager()).thenReturn(rpm); when(rpm.listPeers(null)).thenReturn(new ArrayList<>()); ReplicationQueueStorage rqs = mock(ReplicationQueueStorage.class); @@ -157,7 +157,7 @@ public void testNoConf() { @Test public void testCanNotFilter() { - assertTrue(services.getReplicationPeerManager().getReplicationLogCleanerBarrier().disable()); + assertTrue(services.getReplicationLogCleanerBarrier().disable()); List files = Arrays.asList(new FileStatus()); assertSame(Collections.emptyList(), runCleaner(cleaner, files)); } From aec9db4c9c162c5e7946f9ba2bb191bd7b6c7414 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 24 Jul 2023 07:58:13 -0400 Subject: [PATCH 030/514] HBASE-27553 Add row param to mutation slow logs (#5328) Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- .../hbase/shaded/protobuf/ProtobufUtil.java | 6 ++-- .../shaded/protobuf/TestProtobufUtil.java | 30 +++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index a619e39cd6a6..aa3cb39c5971 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2163,7 +2163,8 @@ public static SlowLogParams getSlowLogParams(Message message, boolean slowLogSca } } else if (message instanceof MutationProto) { MutationProto mutationProto = (MutationProto) message; - String params = "type= " + mutationProto.getMutateType().toString(); + String params = "type= " + mutationProto.getMutateType().toString() + ", row= " + + getStringForByteString(mutationProto.getRow()); return new SlowLogParams(params); } else if (message instanceof GetRequest) { GetRequest getRequest = (GetRequest) message; @@ -2182,7 +2183,8 @@ public static SlowLogParams getSlowLogParams(Message message, boolean slowLogSca } else if (message instanceof MutateRequest) { MutateRequest mutateRequest = (MutateRequest) message; String regionName = getStringForByteString(mutateRequest.getRegion().getValue()); - String params = "region= " + regionName; + String params = "region= " + regionName + ", row= " + + getStringForByteString(mutateRequest.getMutation().getRow()); return new SlowLogParams(regionName, params); } else if (message instanceof CoprocessorServiceRequest) { CoprocessorServiceRequest coprocessorServiceRequest = (CoprocessorServiceRequest) message; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index fc442b8998d9..2b4380dfbb6d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.SlowLogParams; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -64,6 +65,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.DeleteType; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @@ -593,4 +595,32 @@ public void testIsEOF() throws Exception { } } } + + @Test + public void testSlowLogParamsMutationProto() { + MutationProto mutationProto = + ClientProtos.MutationProto.newBuilder().setRow(ByteString.copyFromUtf8("row123")).build(); + + SlowLogParams slowLogParams = ProtobufUtil.getSlowLogParams(mutationProto, false); + + assertTrue(slowLogParams.getParams() + .contains(Bytes.toStringBinary(mutationProto.getRow().toByteArray()))); + } + + @Test + public void testSlowLogParamsMutateRequest() { + MutationProto mutationProto = + ClientProtos.MutationProto.newBuilder().setRow(ByteString.copyFromUtf8("row123")).build(); + ClientProtos.MutateRequest mutateRequest = + ClientProtos.MutateRequest.newBuilder().setMutation(mutationProto) + .setRegion(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(ByteString.EMPTY).build()) + .build(); + + SlowLogParams slowLogParams = ProtobufUtil.getSlowLogParams(mutateRequest, false); + + assertTrue(slowLogParams.getParams() + .contains(Bytes.toStringBinary(mutationProto.getRow().toByteArray()))); + } } From 83ea0da77a344ae84adb5426c346350a3004bcad Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 24 Jul 2023 12:53:33 -0400 Subject: [PATCH 031/514] HBASE-27657 Connection and Request Attributes (#5326) Signed-off-by: Bryan Beaudreault --- .../AsyncAdminRequestRetryingCaller.java | 3 +- .../client/AsyncBatchRpcRetryingCaller.java | 7 +- .../hbase/client/AsyncClientScanner.java | 31 +- .../hbase/client/AsyncConnectionImpl.java | 11 +- .../AsyncMasterRequestRpcRetryingCaller.java | 3 +- .../hbase/client/AsyncRpcRetryingCaller.java | 4 +- .../client/AsyncRpcRetryingCallerFactory.java | 31 +- ...syncScanSingleRegionRpcRetryingCaller.java | 4 +- .../AsyncServerRequestRpcRetryingCaller.java | 3 +- .../AsyncSingleRequestRpcRetryingCaller.java | 6 +- .../hadoop/hbase/client/AsyncTable.java | 10 + .../hbase/client/AsyncTableBuilder.java | 5 + .../hbase/client/AsyncTableBuilderBase.java | 14 + .../hadoop/hbase/client/AsyncTableImpl.java | 6 + .../hbase/client/ConnectionFactory.java | 67 +++- .../client/ConnectionOverAsyncConnection.java | 7 +- .../hbase/client/RawAsyncTableImpl.java | 17 +- .../org/apache/hadoop/hbase/client/Table.java | 8 + .../hadoop/hbase/client/TableBuilder.java | 5 + .../hadoop/hbase/client/TableBuilderBase.java | 14 + .../hbase/client/TableOverAsyncTable.java | 5 + .../hadoop/hbase/ipc/AbstractRpcClient.java | 36 +- .../hadoop/hbase/ipc/BlockingRpcClient.java | 8 +- .../hbase/ipc/BlockingRpcConnection.java | 2 +- .../org/apache/hadoop/hbase/ipc/Call.java | 7 +- .../ipc/DelegatingHBaseRpcController.java | 11 + .../hadoop/hbase/ipc/HBaseRpcController.java | 11 + .../hbase/ipc/HBaseRpcControllerImpl.java | 14 + .../org/apache/hadoop/hbase/ipc/IPCUtil.java | 11 + .../hadoop/hbase/ipc/NettyRpcClient.java | 11 +- .../hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- .../hadoop/hbase/ipc/RpcClientFactory.java | 13 +- .../hadoop/hbase/ipc/RpcConnection.java | 15 +- .../TestRpcBasedRegistryHedgedReads.java | 3 +- .../hbase/ipc/TestTLSHandshadeFailure.java | 4 +- .../mapreduce/TestHFileOutputFormat2.java | 7 +- .../TestMultiTableInputFormatBase.java | 3 +- .../mapreduce/TestTableInputFormatBase.java | 4 +- .../src/main/protobuf/rpc/RPC.proto | 2 + .../client/AsyncClusterConnectionImpl.java | 3 +- .../AsyncRegionReplicationRetryingCaller.java | 4 +- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 3 + .../apache/hadoop/hbase/ipc/ServerCall.java | 6 + .../hadoop/hbase/client/DummyAsyncTable.java | 6 + .../hbase/client/TestClientTimeouts.java | 5 +- .../TestRequestAndConnectionAttributes.java | 317 ++++++++++++++++++ .../hadoop/hbase/ipc/TestRpcClientLeaks.java | 5 +- .../ipc/TestRpcServerSlowConnectionSetup.java | 2 +- .../namequeues/TestNamedQueueRecorder.java | 7 +- .../region/TestRegionProcedureStore.java | 6 + .../thrift2/client/ThriftConnection.java | 9 +- 51 files changed, 712 insertions(+), 86 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index d3bec8b3cfbf..f7fa7e9f03fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; +import java.util.Collections; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; @@ -44,7 +45,7 @@ public AsyncAdminRequestRetryingCaller(Timer retryTimer, AsyncConnectionImpl con long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, Collections.emptyMap()); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index 7a8bbeb9420b..c485a0a2c05c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -114,6 +114,8 @@ class AsyncBatchRpcRetryingCaller { private final HBaseServerExceptionPauseManager pauseManager; + private final Map requestAttributes; + // we can not use HRegionLocation as the map key because the hashCode and equals method of // HRegionLocation only consider serverName. private static final class RegionRequest { @@ -149,7 +151,8 @@ public int getPriority() { public AsyncBatchRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, TableName tableName, List actions, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, + Map requestAttributes) { this.retryTimer = retryTimer; this.conn = conn; this.tableName = tableName; @@ -180,6 +183,7 @@ public AsyncBatchRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, this.startNs = System.nanoTime(); this.pauseManager = new HBaseServerExceptionPauseManager(pauseNs, pauseNsForServerOverloaded, operationTimeoutNs); + this.requestAttributes = requestAttributes; } private static boolean hasIncrementOrAppend(Row action) { @@ -392,6 +396,7 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr HBaseRpcController controller = conn.rpcControllerFactory.newController(); resetController(controller, Math.min(rpcTimeoutNs, remainingNs), calcPriority(serverReq.getPriority(), tableName)); + controller.setRequestAttributes(requestAttributes); if (!cells.isEmpty()) { controller.setCellScanner(createCellScanner(cells)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java index ed381df7e0da..b61f5b80c9e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java @@ -32,6 +32,7 @@ import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; import java.io.IOException; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -92,9 +93,12 @@ class AsyncClientScanner { private final Span span; + private final Map requestAttributes; + public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableName tableName, AsyncConnectionImpl conn, Timer retryTimer, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, + Map requestAttributes) { if (scan.getStartRow() == null) { scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow()); } @@ -113,6 +117,7 @@ public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableN this.rpcTimeoutNs = rpcTimeoutNs; this.startLogErrorsCnt = startLogErrorsCnt; this.resultCache = createScanResultCache(scan); + this.requestAttributes = requestAttributes; if (scan.isScanMetricsEnabled()) { this.scanMetrics = new ScanMetrics(); consumer.onScanMetricsCreated(scanMetrics); @@ -191,15 +196,17 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle } private void startScan(OpenScannerResponse resp) { - addListener(conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()) - .location(resp.loc).remote(resp.isRegionServerRemote) - .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) - .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) - .start(resp.controller, resp.resp), (hasMore, error) -> { + addListener( + conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()).location(resp.loc) + .remote(resp.isRegionServerRemote) + .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) + .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .setRequestAttributes(requestAttributes).start(resp.controller, resp.resp), + (hasMore, error) -> { try (Scope ignored = span.makeCurrent()) { if (error != null) { try { @@ -231,8 +238,8 @@ private CompletableFuture openScanner(int replicaId) { .priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner) - .call(); + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .setRequestAttributes(requestAttributes).action(this::callOpenScanner).call(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 3af574cfc0b2..4900581c69ad 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -31,6 +31,8 @@ import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.net.SocketAddress; +import java.util.Collections; +import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; @@ -127,6 +129,11 @@ public class AsyncConnectionImpl implements AsyncConnection { public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, SocketAddress localAddress, User user) { + this(conf, registry, clusterId, localAddress, user, Collections.emptyMap()); + } + + public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, + SocketAddress localAddress, User user, Map connectionAttributes) { this.conf = conf; this.user = user; this.metricsScope = MetricsConnection.getScope(conf, clusterId, this); @@ -142,8 +149,8 @@ public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, Stri } else { this.metrics = Optional.empty(); } - this.rpcClient = - RpcClientFactory.createClient(conf, clusterId, localAddress, metrics.orElse(null)); + this.rpcClient = RpcClientFactory.createClient(conf, clusterId, localAddress, + metrics.orElse(null), connectionAttributes); this.rpcControllerFactory = RpcControllerFactory.instantiate(conf); this.rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java index c02b80c666ae..42585ea1c919 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; +import java.util.Collections; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.ipc.HBaseRpcController; @@ -47,7 +48,7 @@ public AsyncMasterRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxRetries, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, Collections.emptyMap()); this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index 8b317bfec2c2..c3dd8740854e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; @@ -78,7 +79,7 @@ public abstract class AsyncRpcRetryingCaller { public AsyncRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int priority, long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, - long rpcTimeoutNs, int startLogErrorsCnt) { + long rpcTimeoutNs, int startLogErrorsCnt, Map requestAttributes) { this.retryTimer = retryTimer; this.conn = conn; this.priority = priority; @@ -89,6 +90,7 @@ public AsyncRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, int pr this.future = new CompletableFuture<>(); this.controller = conn.rpcControllerFactory.newController(); this.controller.setPriority(priority); + this.controller.setRequestAttributes(requestAttributes); this.exceptions = new ArrayList<>(); this.startNs = System.nanoTime(); this.pauseManager = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java index 2d8e7b7aabe9..1ea2a1ad7dd4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java @@ -23,7 +23,9 @@ import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HRegionLocation; @@ -83,6 +85,8 @@ public class SingleRequestCallerBuilder extends BuilderBase { private int priority = PRIORITY_UNSET; + private Map requestAttributes = Collections.emptyMap(); + public SingleRequestCallerBuilder table(TableName tableName) { this.tableName = tableName; return this; @@ -144,6 +148,12 @@ public SingleRequestCallerBuilder priority(int priority) { return this; } + public SingleRequestCallerBuilder + setRequestAttributes(Map requestAttributes) { + this.requestAttributes = requestAttributes; + return this; + } + private void preCheck() { checkArgument(replicaId >= 0, "invalid replica id %s", replicaId); checkNotNull(tableName, "tableName is null"); @@ -157,7 +167,7 @@ public AsyncSingleRequestRpcRetryingCaller build() { preCheck(); return new AsyncSingleRequestRpcRetryingCaller<>(retryTimer, conn, tableName, row, replicaId, locateType, callable, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, requestAttributes); } /** @@ -201,6 +211,8 @@ public class ScanSingleRegionCallerBuilder extends BuilderBase { private int priority = PRIORITY_UNSET; + private Map requestAttributes = Collections.emptyMap(); + public ScanSingleRegionCallerBuilder id(long scannerId) { this.scannerId = scannerId; return this; @@ -278,6 +290,12 @@ public ScanSingleRegionCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) { return this; } + public ScanSingleRegionCallerBuilder + setRequestAttributes(Map requestAttributes) { + this.requestAttributes = requestAttributes; + return this; + } + private void preCheck() { checkArgument(scannerId != null, "invalid scannerId %d", scannerId); checkNotNull(scan, "scan is null"); @@ -293,7 +311,7 @@ public AsyncScanSingleRegionRpcRetryingCaller build() { return new AsyncScanSingleRegionRpcRetryingCaller(retryTimer, conn, scan, scanMetrics, scannerId, resultCache, consumer, stub, loc, isRegionServerRemote, priority, scannerLeaseTimeoutPeriodNs, pauseNs, pauseNsForServerOverloaded, maxAttempts, - scanTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + scanTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, requestAttributes); } /** @@ -322,6 +340,8 @@ public class BatchCallerBuilder extends BuilderBase { private long rpcTimeoutNs = -1L; + private Map requestAttributes = Collections.emptyMap(); + public BatchCallerBuilder table(TableName tableName) { this.tableName = tableName; return this; @@ -362,10 +382,15 @@ public BatchCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) { return this; } + public BatchCallerBuilder setRequestAttributes(Map requestAttributes) { + this.requestAttributes = requestAttributes; + return this; + } + public AsyncBatchRpcRetryingCaller build() { return new AsyncBatchRpcRetryingCaller<>(retryTimer, conn, tableName, actions, pauseNs, pauseNsForServerOverloaded, maxAttempts, operationTimeoutNs, rpcTimeoutNs, - startLogErrorsCnt); + startLogErrorsCnt, requestAttributes); } public List> call() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index ca39051de84d..a5d4ef6407e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; @@ -316,7 +317,7 @@ public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionI AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc, boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, - int startLogErrorsCnt) { + int startLogErrorsCnt, Map requestAttributes) { this.retryTimer = retryTimer; this.conn = conn; this.scan = scan; @@ -341,6 +342,7 @@ public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionI this.priority = priority; this.controller = conn.rpcControllerFactory.newController(); this.controller.setPriority(priority); + this.controller.setRequestAttributes(requestAttributes); this.exceptions = new ArrayList<>(); this.pauseManager = new HBaseServerExceptionPauseManager(pauseNs, pauseNsForServerOverloaded, scanTimeoutNs); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java index 40cd3b87e928..d4484ba87bf1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; +import java.util.Collections; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -49,7 +50,7 @@ public AsyncServerRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl long pauseNs, long pauseNsForServerOverloaded, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseNsForServerOverloaded, maxAttempts, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, Collections.emptyMap()); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java index 9c115af97b5b..a0d536aef5f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; +import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.HRegionLocation; @@ -57,9 +58,10 @@ CompletableFuture call(HBaseRpcController controller, HRegionLocation loc, public AsyncSingleRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn, TableName tableName, byte[] row, int replicaId, RegionLocateType locateType, Callable callable, int priority, long pauseNs, long pauseNsForServerOverloaded, - int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { + int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, + Map requestAttributes) { super(retryTimer, conn, priority, pauseNs, pauseNsForServerOverloaded, maxAttempts, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, requestAttributes); this.tableName = tableName; this.row = row; this.replicaId = replicaId; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index 3c03444cfbbc..2979c6689884 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -22,9 +22,11 @@ import static org.apache.hadoop.hbase.util.FutureUtils.allOf; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.TableName; @@ -110,6 +112,14 @@ public interface AsyncTable { */ long getScanTimeout(TimeUnit unit); + /** + * Get the map of request attributes + * @return a map of request attributes supplied by the client + */ + default Map getRequestAttributes() { + throw new NotImplementedException("Add an implementation!"); + } + /** * Test for the existence of columns in the table, as specified by the Get. *

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java index f6db89f82bf5..007f7ad48685 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java @@ -137,6 +137,11 @@ default AsyncTableBuilder setMaxRetries(int maxRetries) { */ AsyncTableBuilder setStartLogErrorsCnt(int startLogErrorsCnt); + /** + * Set a request attribute + */ + AsyncTableBuilder setRequestAttribute(String key, byte[] value); + /** * Create the {@link AsyncTable} instance. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java index 624d6e1dbb0a..02e9da0770b4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java @@ -19,6 +19,9 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -50,6 +53,8 @@ abstract class AsyncTableBuilderBase protected int startLogErrorsCnt; + protected Map requestAttributes = Collections.emptyMap(); + AsyncTableBuilderBase(TableName tableName, AsyncConnectionConfiguration connConf) { this.tableName = tableName; this.operationTimeoutNs = tableName.isSystemTable() @@ -121,4 +126,13 @@ public AsyncTableBuilderBase setStartLogErrorsCnt(int startLogErrorsCnt) { this.startLogErrorsCnt = startLogErrorsCnt; return this; } + + @Override + public AsyncTableBuilder setRequestAttribute(String key, byte[] value) { + if (this.requestAttributes.isEmpty()) { + this.requestAttributes = new HashMap<>(); + } + this.requestAttributes.put(key, value); + return this; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index e785e587ab36..590ee9bc47a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -24,6 +24,7 @@ import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -101,6 +102,11 @@ public long getScanTimeout(TimeUnit unit) { return rawTable.getScanTimeout(unit); } + @Override + public Map getRequestAttributes() { + return rawTable.getRequestAttributes(); + } + private CompletableFuture wrap(CompletableFuture future) { return FutureUtils.wrapFuture(future, pool); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 4d4559f4b7a9..ac70091dcf65 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.security.PrivilegedExceptionAction; +import java.util.Collections; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; @@ -216,21 +218,53 @@ public static Connection createConnection(Configuration conf, User user) throws */ public static Connection createConnection(Configuration conf, ExecutorService pool, final User user) throws IOException { + return createConnection(conf, pool, user, Collections.emptyMap()); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection, meta cache, and connections to + * region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *

+   * Connection connection = ConnectionFactory.createConnection(conf);
+   * Table table = connection.getTable(TableName.valueOf("table1"));
+   * try {
+   *   table.get(...);
+   *   ...
+   * } finally {
+   *   table.close();
+   *   connection.close();
+   * }
+   * 
+ * + * @param conf configuration + * @param user the user the connection is for + * @param pool the thread pool to use for batch operations + * @param connectionAttributes attributes to be sent along to server during connection establish + * @return Connection object for conf + */ + public static Connection createConnection(Configuration conf, ExecutorService pool, + final User user, Map connectionAttributes) throws IOException { Class clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionOverAsyncConnection.class, Connection.class); if (clazz != ConnectionOverAsyncConnection.class) { try { // Default HCM#HCI is not accessible; make it so before invoking. - Constructor constructor = - clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); + Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, + ExecutorService.class, User.class, Map.class); constructor.setAccessible(true); - return user.runAs((PrivilegedExceptionAction< - Connection>) () -> (Connection) constructor.newInstance(conf, pool, user)); + return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor + .newInstance(conf, pool, user, connectionAttributes)); } catch (Exception e) { throw new IOException(e); } } else { - return FutureUtils.get(createAsyncConnection(conf, user)).toConnection(); + return FutureUtils.get(createAsyncConnection(conf, user, connectionAttributes)) + .toConnection(); } } @@ -281,6 +315,27 @@ public static CompletableFuture createAsyncConnection(Configura */ public static CompletableFuture createAsyncConnection(Configuration conf, final User user) { + return createAsyncConnection(conf, user, null); + } + + /** + * Create a new AsyncConnection instance using the passed {@code conf} and {@code user}. + * AsyncConnection encapsulates all housekeeping for a connection to the cluster. All tables and + * interfaces created from returned connection share zookeeper connection, meta cache, and + * connections to region servers and masters. + *

+ * The caller is responsible for calling {@link AsyncConnection#close()} on the returned + * connection instance. + *

+ * Usually you should only create one AsyncConnection instance in your code and use it everywhere + * as it is thread safe. + * @param conf configuration + * @param user the user the asynchronous connection is for + * @param connectionAttributes attributes to be sent along to server during connection establish + * @return AsyncConnection object wrapped by CompletableFuture + */ + public static CompletableFuture createAsyncConnection(Configuration conf, + final User user, Map connectionAttributes) { return TraceUtil.tracedFuture(() -> { CompletableFuture future = new CompletableFuture<>(); ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf); @@ -300,7 +355,7 @@ public static CompletableFuture createAsyncConnection(Configura try { future.complete( user.runAs((PrivilegedExceptionAction) () -> ReflectionUtils - .newInstance(clazz, conf, registry, clusterId, null, user))); + .newInstance(clazz, conf, registry, clusterId, null, user, connectionAttributes))); } catch (Exception e) { registry.close(); future.completeExceptionally(e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java index 7a7b38a4df6a..51368fc23c15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java @@ -189,12 +189,13 @@ public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) { public Table build() { IOExceptionSupplier poolSupplier = pool != null ? () -> pool : ConnectionOverAsyncConnection.this::getBatchPool; - return new TableOverAsyncTable(conn, + AsyncTableBuilder tableBuilder = conn.getTableBuilder(tableName).setRpcTimeout(rpcTimeout, TimeUnit.MILLISECONDS) .setReadRpcTimeout(readRpcTimeout, TimeUnit.MILLISECONDS) .setWriteRpcTimeout(writeRpcTimeout, TimeUnit.MILLISECONDS) - .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS).build(), - poolSupplier); + .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS); + requestAttributes.forEach(tableBuilder::setRequestAttribute); + return new TableOverAsyncTable(conn, tableBuilder.build(), poolSupplier); } }; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java index ff75c0725ce5..342cf89acf1a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java @@ -34,6 +34,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -119,6 +120,8 @@ class RawAsyncTableImpl implements AsyncTable { private final int startLogErrorsCnt; + private final Map requestAttributes; + RawAsyncTableImpl(AsyncConnectionImpl conn, Timer retryTimer, AsyncTableBuilderBase builder) { this.conn = conn; this.retryTimer = retryTimer; @@ -145,6 +148,7 @@ class RawAsyncTableImpl implements AsyncTable { ? conn.connConf.getMetaScannerCaching() : conn.connConf.getScannerCaching(); this.defaultScannerMaxResultSize = conn.connConf.getScannerMaxResultSize(); + this.requestAttributes = builder.requestAttributes; } @Override @@ -210,7 +214,8 @@ private SingleRequestCallerBuilder newCaller(byte[] row, int priority, lo .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) .pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); + .maxAttempts(maxAttempts).setRequestAttributes(requestAttributes) + .startLogErrorsCnt(startLogErrorsCnt).setRequestAttributes(requestAttributes); } private SingleRequestCallerBuilder @@ -608,7 +613,7 @@ private Scan setDefaultScanConfig(Scan scan) { public void scan(Scan scan, AdvancedScanResultConsumer consumer) { new AsyncClientScanner(setDefaultScanConfig(scan), consumer, tableName, conn, retryTimer, pauseNs, pauseNsForServerOverloaded, maxAttempts, scanTimeoutNs, readRpcTimeoutNs, - startLogErrorsCnt).start(); + startLogErrorsCnt, requestAttributes).start(); } private long resultSize2CacheSize(long maxResultSize) { @@ -704,7 +709,8 @@ private List> batch(List actions, long r .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).call(); + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .setRequestAttributes(requestAttributes).call(); } @Override @@ -732,6 +738,11 @@ public long getScanTimeout(TimeUnit unit) { return unit.convert(scanTimeoutNs, TimeUnit.NANOSECONDS); } + @Override + public Map getRequestAttributes() { + return requestAttributes; + } + private CompletableFuture coprocessorService(Function stubMaker, ServiceCaller callable, RegionInfo region, byte[] row) { RegionCoprocessorRpcChannelImpl channel = new RegionCoprocessorRpcChannelImpl(conn, tableName, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 7feefc831ca0..3941c0d18540 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -751,4 +751,12 @@ default long getWriteRpcTimeout(TimeUnit unit) { default long getOperationTimeout(TimeUnit unit) { throw new NotImplementedException("Add an implementation!"); } + + /** + * Get the attributes to be submitted with requests + * @return map of request attributes + */ + default Map getRequestAttributes() { + throw new NotImplementedException("Add an implementation!"); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java index 75e16e89a5de..eee985555b34 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java @@ -55,6 +55,11 @@ public interface TableBuilder { */ TableBuilder setWriteRpcTimeout(int timeout); + /** + * Set a request attribute + */ + TableBuilder setRequestAttribute(String key, byte[] value); + /** * Create the {@link Table} instance. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java index c74340259f3f..dc3111b0c79d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -36,6 +39,8 @@ abstract class TableBuilderBase implements TableBuilder { protected int writeRpcTimeout; + protected Map requestAttributes = Collections.emptyMap(); + TableBuilderBase(TableName tableName, ConnectionConfiguration connConf) { if (tableName == null) { throw new IllegalArgumentException("Given table name is null"); @@ -73,4 +78,13 @@ public TableBuilderBase setWriteRpcTimeout(int timeout) { this.writeRpcTimeout = timeout; return this; } + + @Override + public TableBuilderBase setRequestAttribute(String key, byte[] value) { + if (this.requestAttributes.isEmpty()) { + this.requestAttributes = new HashMap<>(); + } + this.requestAttributes.put(key, value); + return this; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java index e1565f18159a..0a7dabd476ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java @@ -560,6 +560,11 @@ public long getOperationTimeout(TimeUnit unit) { return table.getOperationTimeout(unit); } + @Override + public Map getRequestAttributes() { + return table.getRequestAttributes(); + } + @Override public RegionLocator getRegionLocator() throws IOException { return conn.toConnection().getRegionLocator(getName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 23d14c272d2b..5e42558671b7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.net.SocketAddress; import java.util.Collection; +import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -106,6 +107,7 @@ public abstract class AbstractRpcClient implements RpcC private boolean running = true; // if client runs protected final Configuration conf; + protected final Map connectionAttributes; protected final String clusterId; protected final SocketAddress localAddr; protected final MetricsConnection metrics; @@ -154,7 +156,7 @@ public AtomicInteger load(Address key) throws Exception { * @param metrics the connection metrics */ public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { + MetricsConnection metrics, Map connectionAttributes) { this.userProvider = UserProvider.instantiate(conf); this.localAddr = localAddr; this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); @@ -167,6 +169,7 @@ public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress loc this.minIdleTimeBeforeClose = conf.getInt(IDLE_TIME, 120000); // 2 minutes this.conf = conf; + this.connectionAttributes = connectionAttributes; this.codec = getCodec(); this.compressor = getCompressor(conf); this.fallbackAllowed = conf.getBoolean(IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, @@ -416,23 +419,24 @@ private Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcCon } final AtomicInteger counter = concurrentCounterCache.getUnchecked(addr); - Call call = new Call(nextCallId(), md, param, hrc.cellScanner(), returnType, - hrc.getCallTimeout(), hrc.getPriority(), new RpcCallback() { - @Override - public void run(Call call) { - try (Scope scope = call.span.makeCurrent()) { - counter.decrementAndGet(); - onCallFinished(call, hrc, addr, callback); - } finally { - if (hrc.failed()) { - TraceUtil.setError(span, hrc.getFailed()); - } else { - span.setStatus(StatusCode.OK); + Call call = + new Call(nextCallId(), md, param, hrc.cellScanner(), returnType, hrc.getCallTimeout(), + hrc.getPriority(), hrc.getRequestAttributes(), new RpcCallback() { + @Override + public void run(Call call) { + try (Scope scope = call.span.makeCurrent()) { + counter.decrementAndGet(); + onCallFinished(call, hrc, addr, callback); + } finally { + if (hrc.failed()) { + TraceUtil.setError(span, hrc.getFailed()); + } else { + span.setStatus(StatusCode.OK); + } + span.end(); } - span.end(); } - } - }, cs); + }, cs); ConnectionId remoteId = new ConnectionId(ticket, md.getService().getName(), addr); int count = counter.incrementAndGet(); try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java index 7fffdad935fc..3da00c5395d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.net.SocketAddress; +import java.util.Collections; +import java.util.Map; import javax.net.SocketFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -41,7 +43,7 @@ public class BlockingRpcClient extends AbstractRpcClient * SocketFactory */ BlockingRpcClient(Configuration conf) { - this(conf, HConstants.CLUSTER_ID_DEFAULT, null, null); + this(conf, HConstants.CLUSTER_ID_DEFAULT, null, null, Collections.emptyMap()); } /** @@ -53,8 +55,8 @@ public class BlockingRpcClient extends AbstractRpcClient * @param metrics the connection metrics */ public BlockingRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { - super(conf, clusterId, localAddr, metrics); + MetricsConnection metrics, Map connectionAttributes) { + super(conf, clusterId, localAddr, metrics, connectionAttributes); this.socketFactory = NetUtils.getDefaultSocketFactory(conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index d63d14940e78..81ad4d2f056d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -219,7 +219,7 @@ public void cleanup(IOException e) { BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, - rpcClient.metrics); + rpcClient.metrics, rpcClient.connectionAttributes); this.rpcClient = rpcClient; this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index 3c0e24e57145..669fc73a3bfa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -19,6 +19,7 @@ import io.opentelemetry.api.trace.Span; import java.io.IOException; +import java.util.Map; import java.util.Optional; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; @@ -56,14 +57,15 @@ class Call { final Descriptors.MethodDescriptor md; final int timeout; // timeout in millisecond for this call; 0 means infinite. final int priority; + final Map attributes; final MetricsConnection.CallStats callStats; private final RpcCallback callback; final Span span; Timeout timeoutTask; Call(int id, final Descriptors.MethodDescriptor md, Message param, final CellScanner cells, - final Message responseDefaultType, int timeout, int priority, RpcCallback callback, - MetricsConnection.CallStats callStats) { + final Message responseDefaultType, int timeout, int priority, Map attributes, + RpcCallback callback, MetricsConnection.CallStats callStats) { this.param = param; this.md = md; this.cells = cells; @@ -73,6 +75,7 @@ class Call { this.id = id; this.timeout = timeout; this.priority = priority; + this.attributes = attributes; this.callback = callback; this.span = Span.current(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java index 9bee88d599f7..c752f4c18355 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; +import java.util.Map; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -112,6 +113,16 @@ public boolean hasCallTimeout() { return delegate.hasCallTimeout(); } + @Override + public Map getRequestAttributes() { + return delegate.getRequestAttributes(); + } + + @Override + public void setRequestAttributes(Map requestAttributes) { + delegate.setRequestAttributes(requestAttributes); + } + @Override public void setFailed(IOException e) { delegate.setFailed(e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java index c60de7658f3d..cd303a5eda77 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; +import java.util.Map; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -71,6 +72,16 @@ public interface HBaseRpcController extends RpcController, CellScannable { boolean hasCallTimeout(); + /** + * Get the map of request attributes + */ + Map getRequestAttributes(); + + /** + * Set the map of request attributes + */ + void setRequestAttributes(Map requestAttributes); + /** * Set failed with an exception to pass on. For use in async rpc clients * @param e exception to set with diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java index 99ed5c4d48b6..425c5e77afcd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java @@ -19,7 +19,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; @@ -70,6 +72,8 @@ public class HBaseRpcControllerImpl implements HBaseRpcController { */ private CellScanner cellScanner; + private Map requestAttributes = Collections.emptyMap(); + public HBaseRpcControllerImpl() { this(null, (CellScanner) null); } @@ -166,6 +170,16 @@ public boolean hasCallTimeout() { return callTimeout != null; } + @Override + public Map getRequestAttributes() { + return requestAttributes; + } + + @Override + public void setRequestAttributes(Map requestAttributes) { + this.requestAttributes = requestAttributes; + } + @Override public synchronized String errorText() { if (!done || exception == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index b509dcbd27b7..d6df6c974ccf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -25,6 +25,7 @@ import java.net.ConnectException; import java.net.SocketTimeoutException; import java.nio.channels.ClosedChannelException; +import java.util.Map; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -44,10 +45,12 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocal; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; @@ -126,6 +129,14 @@ static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) if (call.priority != HConstants.PRIORITY_UNSET) { builder.setPriority(call.priority); } + if (call.attributes != null && !call.attributes.isEmpty()) { + HBaseProtos.NameBytesPair.Builder attributeBuilder = HBaseProtos.NameBytesPair.newBuilder(); + for (Map.Entry attribute : call.attributes.entrySet()) { + attributeBuilder.setName(attribute.getKey()); + attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); + builder.addAttribute(attributeBuilder.build()); + } + } builder.setTimeout(call.timeout); return builder.build(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java index 231caa40a89e..ed0c4fffc724 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.net.SocketAddress; +import java.util.Collections; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -55,7 +57,12 @@ public class NettyRpcClient extends AbstractRpcClient { public NettyRpcClient(Configuration configuration, String clusterId, SocketAddress localAddress, MetricsConnection metrics) { - super(configuration, clusterId, localAddress, metrics); + this(configuration, clusterId, localAddress, metrics, Collections.emptyMap()); + } + + public NettyRpcClient(Configuration configuration, String clusterId, SocketAddress localAddress, + MetricsConnection metrics, Map connectionAttributes) { + super(configuration, clusterId, localAddress, metrics, connectionAttributes); Pair> groupAndChannelClass = NettyRpcClientConfigHelper.getEventLoopConfig(conf); if (groupAndChannelClass == null) { @@ -75,7 +82,7 @@ public NettyRpcClient(Configuration configuration, String clusterId, SocketAddre /** Used in test only. */ public NettyRpcClient(Configuration configuration) { - this(configuration, HConstants.CLUSTER_ID_DEFAULT, null, null); + this(configuration, HConstants.CLUSTER_ID_DEFAULT, null, null, Collections.emptyMap()); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 48104038c217..3f9a58d51263 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -104,7 +104,7 @@ class NettyRpcConnection extends RpcConnection { NettyRpcConnection(NettyRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, - rpcClient.metrics); + rpcClient.metrics, rpcClient.connectionAttributes); this.rpcClient = rpcClient; this.eventLoop = rpcClient.group.next(); byte[] connectionHeaderPreamble = getConnectionHeaderPreamble(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java index 9b69b5234050..f1df572675c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.ipc; import java.net.SocketAddress; +import java.util.Collections; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -59,7 +61,7 @@ public static RpcClient createClient(Configuration conf, String clusterId) { */ public static RpcClient createClient(Configuration conf, String clusterId, MetricsConnection metrics) { - return createClient(conf, clusterId, null, metrics); + return createClient(conf, clusterId, null, metrics, Collections.emptyMap()); } private static String getRpcClientClass(Configuration conf) { @@ -81,10 +83,11 @@ private static String getRpcClientClass(Configuration conf) { * @return newly created RpcClient */ public static RpcClient createClient(Configuration conf, String clusterId, - SocketAddress localAddr, MetricsConnection metrics) { + SocketAddress localAddr, MetricsConnection metrics, Map connectionAttributes) { String rpcClientClass = getRpcClientClass(conf); - return ReflectionUtils.instantiateWithCustomCtor(rpcClientClass, new Class[] { - Configuration.class, String.class, SocketAddress.class, MetricsConnection.class }, - new Object[] { conf, clusterId, localAddr, metrics }); + return ReflectionUtils.instantiateWithCustomCtor( + rpcClientClass, new Class[] { Configuration.class, String.class, SocketAddress.class, + MetricsConnection.class, Map.class }, + new Object[] { conf, clusterId, localAddr, metrics, connectionAttributes }); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 912fa4fb0654..31698a1a1e8e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -39,11 +40,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hbase.thirdparty.io.netty.util.TimerTask; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @@ -70,6 +73,7 @@ abstract class RpcConnection { protected final CompressionCodec compressor; protected final MetricsConnection metrics; + private final Map connectionAttributes; protected final HashedWheelTimer timeoutTimer; @@ -86,12 +90,13 @@ abstract class RpcConnection { protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId, String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, - MetricsConnection metrics) throws IOException { + MetricsConnection metrics, Map connectionAttributes) throws IOException { this.timeoutTimer = timeoutTimer; this.codec = codec; this.compressor = compressor; this.conf = conf; this.metrics = metrics; + this.connectionAttributes = connectionAttributes; User ticket = remoteId.getTicket(); this.securityInfo = SecurityInfo.getInfo(remoteId.getServiceName()); this.useSasl = isSecurityEnabled; @@ -169,6 +174,14 @@ protected final ConnectionHeader getConnectionHeader() { if (this.compressor != null) { builder.setCellBlockCompressorClass(this.compressor.getClass().getCanonicalName()); } + if (connectionAttributes != null && !connectionAttributes.isEmpty()) { + HBaseProtos.NameBytesPair.Builder attributeBuilder = HBaseProtos.NameBytesPair.newBuilder(); + for (Map.Entry attribute : connectionAttributes.entrySet()) { + attributeBuilder.setName(attribute.getKey()); + attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); + builder.addAttribute(attributeBuilder.build()); + } + } builder.setVersionInfo(ProtobufUtil.getVersionInfo()); boolean isCryptoAESEnable = conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT); // if Crypto AES enable, setup Cipher transformation diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java index 6c97c19f96cc..54b351f00a3b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.SocketAddress; import java.util.Collections; +import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; @@ -95,7 +96,7 @@ public class TestRpcBasedRegistryHedgedReads { public static final class RpcClientImpl implements RpcClient { public RpcClientImpl(Configuration configuration, String clusterId, SocketAddress localAddress, - MetricsConnection metrics) { + MetricsConnection metrics, Map attributes) { } @Override diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java index 7375388e4a04..10948358ff92 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; +import java.util.Collections; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; @@ -148,7 +149,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { Address.fromParts("127.0.0.1", server.getLocalPort())); NettyRpcConnection conn = client.createConnection(id); BlockingRpcCallback done = new BlockingRpcCallback<>(); - Call call = new Call(1, null, null, null, null, 0, 0, done, new CallStats()); + Call call = + new Call(1, null, null, null, null, 0, 0, Collections.emptyMap(), done, new CallStats()); HBaseRpcController hrc = new HBaseRpcControllerImpl(); conn.sendRequest(call, hrc); done.get(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 51e9e1e7755f..fc7f66129d35 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -1667,9 +1667,10 @@ private static class ConfigurationCaptorConnection implements Connection { private final Connection delegate; - public ConfigurationCaptorConnection(Configuration conf, ExecutorService es, User user) - throws IOException { - delegate = FutureUtils.get(createAsyncConnection(conf, user)).toConnection(); + public ConfigurationCaptorConnection(Configuration conf, ExecutorService es, User user, + Map connectionAttributes) throws IOException { + delegate = + FutureUtils.get(createAsyncConnection(conf, user, connectionAttributes)).toConnection(); final String uuid = conf.get(UUID_KEY); if (uuid != null) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 7d099aa44e24..0c879bd5ace3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -123,7 +123,8 @@ public static class MRSplitsConnection implements Connection { private final Configuration configuration; static final AtomicInteger creations = new AtomicInteger(0); - MRSplitsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { + MRSplitsConnection(Configuration conf, ExecutorService pool, User user, + Map connectionAttributes) throws IOException { this.configuration = conf; creations.incrementAndGet(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 13e3831f6df6..f41282b8f4f8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -212,8 +212,8 @@ private static class ConnectionForMergeTesting implements Connection { SIZE_MAP.put(Bytes.toBytes("p"), 200L * 1024L * 1024L); } - ConnectionForMergeTesting(Configuration conf, ExecutorService pool, User user) - throws IOException { + ConnectionForMergeTesting(Configuration conf, ExecutorService pool, User user, + Map connectionAttributes) throws IOException { } @Override diff --git a/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto b/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto index 6426f0cb06cb..e992e681fbff 100644 --- a/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto +++ b/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto @@ -92,6 +92,7 @@ message ConnectionHeader { optional VersionInfo version_info = 5; // the transformation for rpc AES encryption with Apache Commons Crypto optional string rpc_crypto_cipher_transformation = 6; + repeated NameBytesPair attribute = 7; } // This is sent by rpc server to negotiate the data if necessary @@ -148,6 +149,7 @@ message RequestHeader { // See HConstants. optional uint32 priority = 6; optional uint32 timeout = 7; + repeated NameBytesPair attribute = 8; } message ResponseHeader { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java index 1dda6c32ca04..e2c11ab1d5e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import java.net.SocketAddress; +import java.util.Collections; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @@ -59,7 +60,7 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClu public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, SocketAddress localAddress, User user) { - super(conf, registry, clusterId, localAddress, user); + super(conf, registry, clusterId, localAddress, user, Collections.emptyMap()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java index 02718145c9b7..e2b45fe30c3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; +import java.util.Collections; import java.util.List; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -54,7 +55,8 @@ public AsyncRegionReplicationRetryingCaller(HashedWheelTimer retryTimer, RegionInfo replica, List entries) { super(retryTimer, conn, ConnectionUtils.getPriority(replica.getTable()), conn.connConf.getPauseNs(), conn.connConf.getPauseNsForServerOverloaded(), maxAttempts, - operationTimeoutNs, rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt()); + operationTimeoutNs, rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt(), + Collections.emptyMap()); this.replica = replica; this.entries = entries.toArray(new Entry[0]); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 197ddb71d7e6..cc97a39c7ee4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -27,6 +27,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -82,6 +83,8 @@ public interface RpcCall extends RpcCallContext { /** Returns The request header of this call. */ RequestHeader getHeader(); + ConnectionHeader getConnectionHeader(); + /** Returns Port of remote address in this call */ int getRemotePort(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 2188795914db..f3568a36f144 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; @@ -207,6 +208,11 @@ public RequestHeader getHeader() { return this.header; } + @Override + public RPCProtos.ConnectionHeader getConnectionHeader() { + return this.connection.connectionHeader; + } + @Override public int getPriority() { return this.header.getPriority(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java index a87babad0d27..45e59def7216 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; @@ -77,6 +78,11 @@ public long getScanTimeout(TimeUnit unit) { return 0; } + @Override + public Map getRequestAttributes() { + return null; + } + @Override public CompletableFuture get(Get get) { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index 65def75fff1b..d358695c5f9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -22,6 +22,7 @@ import java.net.SocketAddress; import java.net.SocketTimeoutException; +import java.util.Map; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; @@ -129,8 +130,8 @@ public void testAdminTimeout() throws Exception { */ public static class RandomTimeoutRpcClient extends BlockingRpcClient { public RandomTimeoutRpcClient(Configuration conf, String clusterId, SocketAddress localAddr, - MetricsConnection metrics) { - super(conf, clusterId, localAddr, metrics); + MetricsConnection metrics, Map connectionAttributes) { + super(conf, clusterId, localAddr, metrics, connectionAttributes); } // Return my own instance, one that does random timeouts diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java new file mode 100644 index 000000000000..b376bfc18557 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WALEdit; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; + +@Category({ ClientTests.class, MediumTests.class }) +public class TestRequestAndConnectionAttributes { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRequestAndConnectionAttributes.class); + + private static final Map CONNECTION_ATTRIBUTES = new HashMap<>(); + static { + CONNECTION_ATTRIBUTES.put("clientId", Bytes.toBytes("foo")); + } + private static final Map REQUEST_ATTRIBUTES = new HashMap<>(); + private static final ExecutorService EXECUTOR_SERVICE = Executors.newFixedThreadPool(100); + private static final AtomicBoolean REQUEST_ATTRIBUTES_VALIDATED = new AtomicBoolean(false); + private static final byte[] REQUEST_ATTRIBUTES_TEST_TABLE_CF = Bytes.toBytes("0"); + private static final TableName REQUEST_ATTRIBUTES_TEST_TABLE = + TableName.valueOf("testRequestAttributes"); + + private static HBaseTestingUtil TEST_UTIL = null; + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + TEST_UTIL.startMiniCluster(1); + TEST_UTIL.createTable(REQUEST_ATTRIBUTES_TEST_TABLE, + new byte[][] { REQUEST_ATTRIBUTES_TEST_TABLE_CF }, 1, HConstants.DEFAULT_BLOCKSIZE, + AttributesCoprocessor.class.getName()); + } + + @AfterClass + public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() { + REQUEST_ATTRIBUTES_VALIDATED.getAndSet(false); + } + + @Test + public void testConnectionAttributes() throws IOException { + TableName tableName = TableName.valueOf("testConnectionAttributes"); + TEST_UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("0") }, 1, + HConstants.DEFAULT_BLOCKSIZE, AttributesCoprocessor.class.getName()); + + Configuration conf = TEST_UTIL.getConfiguration(); + try (Connection conn = ConnectionFactory.createConnection(conf, null, + AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES); Table table = conn.getTable(tableName)) { + Result result = table.get(new Get(Bytes.toBytes(0))); + assertEquals(CONNECTION_ATTRIBUTES.size(), result.size()); + for (Map.Entry attr : CONNECTION_ATTRIBUTES.entrySet()) { + byte[] val = result.getValue(Bytes.toBytes("c"), Bytes.toBytes(attr.getKey())); + assertEquals(Bytes.toStringBinary(attr.getValue()), Bytes.toStringBinary(val)); + } + } + } + + @Test + public void testRequestAttributesGet() throws IOException { + addRandomRequestAttributes(); + + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes( + conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { + + table.get(new Get(Bytes.toBytes(0))); + } + + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + + @Test + public void testRequestAttributesMultiGet() throws IOException { + assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); + addRandomRequestAttributes(); + + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes( + conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { + List gets = ImmutableList.of(new Get(Bytes.toBytes(0)), new Get(Bytes.toBytes(1))); + table.get(gets); + } + + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + + @Test + public void testRequestAttributesExists() throws IOException { + assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); + addRandomRequestAttributes(); + + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes( + conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { + + table.exists(new Get(Bytes.toBytes(0))); + } + + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + + @Test + public void testRequestAttributesScan() throws IOException { + assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); + addRandomRequestAttributes(); + + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes( + conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { + ResultScanner scanner = table.getScanner(new Scan()); + scanner.next(); + } + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + + @Test + public void testRequestAttributesPut() throws IOException { + assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); + addRandomRequestAttributes(); + + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes( + conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { + Put put = new Put(Bytes.toBytes("a")); + put.addColumn(REQUEST_ATTRIBUTES_TEST_TABLE_CF, Bytes.toBytes("c"), Bytes.toBytes("v")); + table.put(put); + } + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + + @Test + public void testRequestAttributesMultiPut() throws IOException { + assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); + addRandomRequestAttributes(); + + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes( + conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { + Put put = new Put(Bytes.toBytes("a")); + put.addColumn(REQUEST_ATTRIBUTES_TEST_TABLE_CF, Bytes.toBytes("c"), Bytes.toBytes("v")); + table.put(put); + } + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + + @Test + public void testNoRequestAttributes() throws IOException { + assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); + TableName tableName = TableName.valueOf("testNoRequestAttributesScan"); + TEST_UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("0") }, 1, + HConstants.DEFAULT_BLOCKSIZE, AttributesCoprocessor.class.getName()); + + REQUEST_ATTRIBUTES.clear(); + Configuration conf = TEST_UTIL.getConfiguration(); + try (Connection conn = ConnectionFactory.createConnection(conf, null, + AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES)) { + TableBuilder tableBuilder = conn.getTableBuilder(tableName, null); + try (Table table = tableBuilder.build()) { + table.get(new Get(Bytes.toBytes(0))); + assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); + } + } + } + + private void addRandomRequestAttributes() { + REQUEST_ATTRIBUTES.clear(); + int j = Math.max(2, (int) (10 * Math.random())); + for (int i = 0; i < j; i++) { + REQUEST_ATTRIBUTES.put(String.valueOf(i), Bytes.toBytes(UUID.randomUUID().toString())); + } + } + + private static TableBuilder configureRequestAttributes(TableBuilder tableBuilder) { + REQUEST_ATTRIBUTES.forEach(tableBuilder::setRequestAttribute); + return tableBuilder; + } + + public static class AttributesCoprocessor implements RegionObserver, RegionCoprocessor { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preGetOp(ObserverContext c, Get get, + List result) throws IOException { + validateRequestAttributes(); + + // for connection attrs test + RpcCall rpcCall = RpcServer.getCurrentCall().get(); + for (HBaseProtos.NameBytesPair attr : rpcCall.getHeader().getAttributeList()) { + result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) + .setFamily(Bytes.toBytes("r")).setQualifier(Bytes.toBytes(attr.getName())) + .setValue(attr.getValue().toByteArray()).setType(Cell.Type.Put).setTimestamp(1).build()); + } + for (HBaseProtos.NameBytesPair attr : rpcCall.getConnectionHeader().getAttributeList()) { + result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) + .setFamily(Bytes.toBytes("c")).setQualifier(Bytes.toBytes(attr.getName())) + .setValue(attr.getValue().toByteArray()).setType(Cell.Type.Put).setTimestamp(1).build()); + } + result.sort(CellComparator.getInstance()); + c.bypass(); + } + + @Override + public boolean preScannerNext(ObserverContext c, + InternalScanner s, List result, int limit, boolean hasNext) throws IOException { + validateRequestAttributes(); + return hasNext; + } + + @Override + public void prePut(ObserverContext c, Put put, WALEdit edit) + throws IOException { + validateRequestAttributes(); + } + + private void validateRequestAttributes() { + RpcCall rpcCall = RpcServer.getCurrentCall().get(); + List attrs = rpcCall.getHeader().getAttributeList(); + if (attrs.size() != REQUEST_ATTRIBUTES.size()) { + return; + } + for (HBaseProtos.NameBytesPair attr : attrs) { + if (!REQUEST_ATTRIBUTES.containsKey(attr.getName())) { + return; + } + if (!Arrays.equals(REQUEST_ATTRIBUTES.get(attr.getName()), attr.getValue().toByteArray())) { + return; + } + } + REQUEST_ATTRIBUTES_VALIDATED.getAndSet(true); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java index f36fef186f08..feaf44e0b84e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.Socket; import java.net.SocketAddress; +import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import org.apache.hadoop.conf.Configuration; @@ -71,8 +72,8 @@ public MyRpcClientImpl(Configuration conf) { } public MyRpcClientImpl(Configuration conf, String clusterId, SocketAddress address, - MetricsConnection metrics) { - super(conf, clusterId, address, metrics); + MetricsConnection metrics, Map connectionAttributes) { + super(conf, clusterId, address, metrics, connectionAttributes); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java index e14b710647d1..80b3845d6688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java @@ -124,7 +124,7 @@ public void test() throws IOException, InterruptedException { int callId = 10; Call call = new Call(callId, TestProtobufRpcProto.getDescriptor().findMethodByName("ping"), EmptyRequestProto.getDefaultInstance(), null, EmptyResponseProto.getDefaultInstance(), 1000, - HConstants.NORMAL_QOS, null, MetricsConnection.newCallStats()); + HConstants.NORMAL_QOS, null, null, MetricsConnection.newCallStats()); RequestHeader requestHeader = IPCUtil.buildRequestHeader(call, null); dos.writeInt(IPCUtil.getTotalSizeWhenWrittenDelimited(requestHeader, call.param)); requestHeader.writeDelimitedTo(dos); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 909e7fdb7f3d..7a3ca0b7cf9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -632,6 +632,7 @@ private static RpcCall getRpcCall(String userName, int forcedParamIndex) { return getRpcCall(userName, Optional.of(forcedParamIndex)); } + @SuppressWarnings("checkstyle:methodlength") private static RpcCall getRpcCall(String userName, Optional forcedParamIndex) { RpcCall rpcCall = new RpcCall() { @Override @@ -666,7 +667,6 @@ public long getStartTime() { @Override public void setStartTime(long startTime) { - } @Override @@ -694,6 +694,11 @@ public RPCProtos.RequestHeader getHeader() { return null; } + @Override + public RPCProtos.ConnectionHeader getConnectionHeader() { + return null; + } + @Override public int getRemotePort() { return 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index d26870b77dfd..dd49d00ac3a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -158,6 +158,7 @@ public void testInsertWithRpcCall() throws Exception { RpcServer.setCurrentCall(null); } + @SuppressWarnings("checkstyle:methodlength") private RpcCall newRpcCallWithDeadline() { return new RpcCall() { @Override @@ -220,6 +221,11 @@ public RPCProtos.RequestHeader getHeader() { return null; } + @Override + public RPCProtos.ConnectionHeader getConnectionHeader() { + return null; + } + @Override public int getRemotePort() { return 0; diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java index 250b8a74f030..db1b1e1c9870 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java @@ -89,8 +89,8 @@ public class ThriftConnection implements Connection { private int operationTimeout; private int connectTimeout; - public ThriftConnection(Configuration conf, ExecutorService pool, final User user) - throws IOException { + public ThriftConnection(Configuration conf, ExecutorService pool, final User user, + Map connectionAttributes) throws IOException { this.conf = conf; this.user = user; this.host = conf.get(Constants.HBASE_THRIFT_SERVER_NAME); @@ -322,6 +322,11 @@ public TableBuilder setWriteRpcTimeout(int timeout) { return this; } + @Override + public TableBuilder setRequestAttribute(String key, byte[] value) { + return this; + } + @Override public Table build() { try { From cf81fd393e130d127bda09995e8d9432733de307 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Thu, 27 Jul 2023 09:41:29 +0800 Subject: [PATCH 032/514] =?UTF-8?q?HBASE-27805=20The=20chunk=20created=20b?= =?UTF-8?q?y=20mslab=20may=20cause=20memory=20fragement=20and=E2=80=A6=20(?= =?UTF-8?q?#5193)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HBASE-27805 The chunk created by mslab may cause memory fragement and lead to fullgc --- .../asciidoc/_chapters/troubleshooting.adoc | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc b/src/main/asciidoc/_chapters/troubleshooting.adoc index eb340bf31df8..411b9b8ef6cd 100644 --- a/src/main/asciidoc/_chapters/troubleshooting.adoc +++ b/src/main/asciidoc/_chapters/troubleshooting.adoc @@ -1566,3 +1566,25 @@ then check if you compiled with jdk8 and tried to run it on jdk7. If so, this won't work. Run on jdk8 or recompile with jdk7. See link:https://issues.apache.org/jira/browse/HBASE-10607[HBASE-10607 JDK8 NoSuchMethodError involving ConcurrentHashMap.keySet if running on JRE 7]. + +=== Full gc caused by mslab when using G1 + +The default size of chunk used by mslab is 2MB, when using G1, if heapRegionSize equals 4MB, these chunks are allocated as humongous objects, exclusively allocating one region, then the remaining 2MB become memory fragment. + +Lots of memory fragment may lead to full gc even if the percent of used heap not high enough. + +The G1HeapRegionSize calculated by initial_heap_size and max_heap_size, here are some cases for better understand: + +- xmx=10G -> region size 2M +- xms=10G, xmx=10G -> region size 4M +- xmx=20G -> region size 4M +- xms=20G, xmx=20G -> region size 8M +- xmx=30G -> region size 4M +- xmx=32G -> region size 8M + +You can avoid this problem by reducing the chunk size a bit to 2047KB as below. + +---- +hbase.hregion.memstore.mslab.chunksize 2096128 +---- + From 67b20fdd9dee8c244e3ddb05e992031f664fc747 Mon Sep 17 00:00:00 2001 From: Ruanhui <32773751+frostruan@users.noreply.github.com> Date: Fri, 28 Jul 2023 23:18:07 +0800 Subject: [PATCH 033/514] HBASE-27988 NPE in AddPeerProcedure recovery (#5331) Co-authored-by: huiruan <876107431@qq.com> Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/master/HMaster.java | 9 +++++++++ .../apache/hadoop/hbase/master/MasterServices.java | 6 ++++++ .../hbase/master/replication/AddPeerProcedure.java | 6 +++--- .../AssignReplicationQueuesProcedure.java | 2 +- .../master/replication/ReplicationPeerManager.java | 12 ------------ .../hadoop/hbase/master/MockNoopMasterServices.java | 6 ++++++ 6 files changed, 25 insertions(+), 16 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 1c77e8dfaafa..1b5291491503 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; @@ -368,6 +369,9 @@ public class HMaster extends HBaseServerBase implements Maste private final ReplicationLogCleanerBarrier replicationLogCleanerBarrier = new ReplicationLogCleanerBarrier(); + // Only allow to add one sync replication peer concurrently + private final Semaphore syncReplicationPeerLock = new Semaphore(1); + // manager of replication private ReplicationPeerManager replicationPeerManager; @@ -4115,6 +4119,11 @@ public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { return replicationLogCleanerBarrier; } + @Override + public Semaphore getSyncReplicationPeerLock() { + return syncReplicationPeerLock; + } + public HashMap>> getReplicationLoad(ServerName[] serverNames) { List peerList = this.getReplicationPeerManager().listPeers(null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index d450fbb45ac0..95166240c789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; +import java.util.concurrent.Semaphore; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; @@ -368,6 +369,11 @@ ReplicationPeerConfig getReplicationPeerConfig(String peerId) */ ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier(); + /** + * Returns the SyncReplicationPeerLock. + */ + Semaphore getSyncReplicationPeerLock(); + /** * Returns the {@link SyncReplicationReplayWALManager}. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java index c469896d3e7d..8f8bdd63ea30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java @@ -89,7 +89,7 @@ protected void releaseLatch(MasterProcedureEnv env) { env.getMasterServices().getReplicationLogCleanerBarrier().enable(); } if (peerConfig.isSyncReplication()) { - env.getReplicationPeerManager().releaseSyncReplicationPeerLock(); + env.getMasterServices().getSyncReplicationPeerLock().release(); } super.releaseLatch(env); } @@ -108,7 +108,7 @@ protected void prePeerModification(MasterProcedureEnv env) cpHost.preAddReplicationPeer(peerId, peerConfig); } if (peerConfig.isSyncReplication()) { - if (!env.getReplicationPeerManager().tryAcquireSyncReplicationPeerLock()) { + if (!env.getMasterServices().getSyncReplicationPeerLock().tryAcquire()) { throw suspend(env.getMasterConfiguration(), backoff -> LOG.warn( "Can not acquire sync replication peer lock for peer {}, sleep {} secs", peerId, @@ -147,7 +147,7 @@ protected void afterReplay(MasterProcedureEnv env) { } cleanerDisabled = true; if (peerConfig.isSyncReplication()) { - if (!env.getReplicationPeerManager().tryAcquireSyncReplicationPeerLock()) { + if (!env.getMasterServices().getSyncReplicationPeerLock().tryAcquire()) { throw new IllegalStateException( "Can not acquire sync replication peer lock for peer " + peerId); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AssignReplicationQueuesProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AssignReplicationQueuesProcedure.java index b547c87009dd..298b40d357f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AssignReplicationQueuesProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AssignReplicationQueuesProcedure.java @@ -186,7 +186,7 @@ protected Flow executeFromState(MasterProcedureEnv env, AssignReplicationQueuesS retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); } long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Failed to claim replication queues for {}, suspend {}secs {}; {};", crashedServer, + LOG.warn("Failed to claim replication queues for {}, suspend {} secs", crashedServer, backoff / 1000, e); setTimeout(Math.toIntExact(backoff)); setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 53a7a6f00146..988c519f781d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -32,7 +32,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -111,9 +110,6 @@ public class ReplicationPeerManager implements ConfigurationObserver { SyncReplicationState.DOWNGRADE_ACTIVE, EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE))); - // Only allow to add one sync replication peer concurrently - private final Semaphore syncReplicationPeerLock = new Semaphore(1); - private final String clusterId; private volatile Configuration conf; @@ -713,14 +709,6 @@ private boolean isStringEquals(String s1, String s2) { return s1.equals(s2); } - public boolean tryAcquireSyncReplicationPeerLock() { - return syncReplicationPeerLock.tryAcquire(); - } - - public void releaseSyncReplicationPeerLock() { - syncReplicationPeerLock.release(); - } - @Override public void onConfigurationChange(Configuration conf) { this.conf = conf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index d526358ceb4e..c82220a8b22a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.List; +import java.util.concurrent.Semaphore; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.ChoreService; @@ -530,4 +531,9 @@ public boolean isReplicationPeerModificationEnabled() { public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { return null; } + + @Override + public Semaphore getSyncReplicationPeerLock() { + return null; + } } From 0bbc8d15c1a9a489b42d85b126b31483cf536c94 Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Fri, 28 Jul 2023 18:40:40 +0200 Subject: [PATCH 034/514] HBASE-27992 Bump exec-maven-plugin to 3.1.0 (#5334) Signed-off-by: Duo Zhang --- hbase-shaded/hbase-shaded-check-invariants/pom.xml | 2 +- hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml | 2 +- pom.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-shaded/hbase-shaded-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-check-invariants/pom.xml index aee04a1d0fe1..324256bb176f 100644 --- a/hbase-shaded/hbase-shaded-check-invariants/pom.xml +++ b/hbase-shaded/hbase-shaded-check-invariants/pom.xml @@ -170,7 +170,7 @@ org.codehaus.mojo exec-maven-plugin - 1.6.0 + ${exec.maven.version} 8.29 - 1.6.0 + 3.1.0 2.16 2.4.2 1.0.0 From f6c5dbe97832d4c7d301bce48792365dd2b79e8a Mon Sep 17 00:00:00 2001 From: Hernan Romer Date: Mon, 31 Jul 2023 17:50:57 -0400 Subject: [PATCH 035/514] VerifyReplication recompare async (#5051) Signed-off-by: Bryan Beaudreault --- .../replication/VerifyReplication.java | 186 ++++++++++++++---- .../VerifyReplicationRecompareRunnable.java | 162 +++++++++++++++ .../replication/TestVerifyReplication.java | 125 +++++++++++- ...estVerifyReplicationRecompareRunnable.java | 154 +++++++++++++++ 4 files changed, 591 insertions(+), 36 deletions(-) create mode 100644 hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 1e268c1858b8..6e3650297bd3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -19,7 +19,12 @@ import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.UUID; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -30,7 +35,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -46,6 +50,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableSplit; +import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication.Verifier.Counters; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; @@ -55,12 +60,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -84,6 +89,11 @@ public class VerifyReplication extends Configured implements Tool { public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; + private static ThreadPoolExecutor reCompareExecutor = null; + int reCompareTries = 0; + int reCompareBackoffExponent = 0; + int reCompareThreads = 0; + int sleepMsBeforeReCompare = 0; long startTime = 0; long endTime = Long.MAX_VALUE; int batch = -1; @@ -94,7 +104,6 @@ public class VerifyReplication extends Configured implements Tool { String peerId = null; String peerQuorumAddress = null; String rowPrefixes = null; - int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; // Source table snapshot name @@ -124,7 +133,12 @@ public enum Counters { BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, - CONTENT_DIFFERENT_ROWS + CONTENT_DIFFERENT_ROWS, + RECOMPARES, + MAIN_THREAD_RECOMPARES, + SOURCE_ROW_CHANGED, + PEER_ROW_CHANGED, + FAILED_RECOMPARE } private Connection sourceConnection; @@ -133,6 +147,9 @@ public enum Counters { private Table replicatedTable; private ResultScanner replicatedScanner; private Result currentCompareRowInPeerTable; + private Scan tableScan; + private int reCompareTries; + private int reCompareBackoffExponent; private int sleepMsBeforeReCompare; private String delimiter = ""; private boolean verbose = false; @@ -150,7 +167,12 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); + reCompareTries = conf.getInt(NAME + ".recompareTries", 0); + reCompareBackoffExponent = conf.getInt(NAME + ".recompareBackoffExponent", 1); sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); + if (sleepMsBeforeReCompare > 0) { + reCompareTries = Math.max(reCompareTries, 1); + } delimiter = conf.get(NAME + ".delimiter", ""); verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); @@ -179,9 +201,12 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) if (versions >= 0) { scan.readVersions(versions); } + int reCompareThreads = conf.getInt(NAME + ".recompareThreads", 0); + reCompareExecutor = buildReCompareExecutor(reCompareThreads, context); TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); sourceConnection = ConnectionFactory.createConnection(conf); sourceTable = sourceConnection.getTable(tableName); + tableScan = scan; final InputSplit tableSplit = context.getInputSplit(); @@ -226,7 +251,7 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) while (true) { if (currentCompareRowInPeerTable == null) { // reach the region end of peer table, row only in source table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value); + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null); break; } int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow()); @@ -240,55 +265,77 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { - logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); + logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value, + currentCompareRowInPeerTable); } currentCompareRowInPeerTable = replicatedScanner.next(); break; } else if (rowCmpRet < 0) { // row only exists in source table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value); + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null); break; } else { // row only exists in peer table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, currentCompareRowInPeerTable); currentCompareRowInPeerTable = replicatedScanner.next(); } } } - private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row) { - if (sleepMsBeforeReCompare > 0) { - Threads.sleep(sleepMsBeforeReCompare); - try { - Result sourceResult = sourceTable.get(new Get(row.getRow())); - Result replicatedResult = replicatedTable.get(new Get(row.getRow())); - Result.compareResults(sourceResult, replicatedResult, false); - if (!sourceResult.isEmpty()) { - context.getCounter(Counters.GOODROWS).increment(1); - if (verbose) { - LOG.info("Good row key (with recompare): " + delimiter - + Bytes.toStringBinary(row.getRow()) + delimiter); - } - } - return; - } catch (Exception e) { - LOG.error("recompare fail after sleep, rowkey=" + delimiter - + Bytes.toStringBinary(row.getRow()) + delimiter); - } + @SuppressWarnings("FutureReturnValueIgnored") + private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row, + Result replicatedRow) { + byte[] rowKey = getRow(row, replicatedRow); + if (reCompareTries == 0) { + context.getCounter(counter).increment(1); + context.getCounter(Counters.BADROWS).increment(1); + LOG.error("{}, rowkey={}{}{}", counter, delimiter, Bytes.toStringBinary(rowKey), delimiter); + return; + } + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + row, replicatedRow, counter, delimiter, tableScan, sourceTable, replicatedTable, + reCompareTries, sleepMsBeforeReCompare, reCompareBackoffExponent, verbose); + + if (reCompareExecutor == null) { + runnable.run(); + return; } - context.getCounter(counter).increment(1); - context.getCounter(Counters.BADROWS).increment(1); - LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) - + delimiter); + + reCompareExecutor.submit(runnable); } @Override protected void cleanup(Context context) { + if (reCompareExecutor != null && !reCompareExecutor.isShutdown()) { + reCompareExecutor.shutdown(); + try { + boolean terminated = reCompareExecutor.awaitTermination(1, TimeUnit.MINUTES); + if (!terminated) { + List queue = reCompareExecutor.shutdownNow(); + for (Runnable runnable : queue) { + ((VerifyReplicationRecompareRunnable) runnable).fail(); + } + + terminated = reCompareExecutor.awaitTermination(1, TimeUnit.MINUTES); + + if (!terminated) { + int activeCount = Math.max(1, reCompareExecutor.getActiveCount()); + LOG.warn("Found {} possible recompares still running in the executable" + + " incrementing BADROWS and FAILED_RECOMPARE", activeCount); + context.getCounter(Counters.BADROWS).increment(activeCount); + context.getCounter(Counters.FAILED_RECOMPARE).increment(activeCount); + } + } + } catch (InterruptedException e) { + throw new RuntimeException("Failed to await executor termination in cleanup", e); + } + } if (replicatedScanner != null) { try { while (currentCompareRowInPeerTable != null) { - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, currentCompareRowInPeerTable); currentCompareRowInPeerTable = replicatedScanner.next(); } @@ -424,6 +471,10 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); + conf.setInt(NAME + ".recompareTries", reCompareTries); + conf.setInt(NAME + ".recompareBackoffExponent", reCompareBackoffExponent); + conf.setInt(NAME + ".recompareThreads", reCompareThreads); + // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -491,6 +542,15 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce return job; } + protected static byte[] getRow(Result sourceResult, Result replicatedResult) { + if (sourceResult != null) { + return sourceResult.getRow(); + } else if (replicatedResult != null) { + return replicatedResult.getRow(); + } + throw new RuntimeException("Both sourceResult and replicatedResult are null!"); + } + private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { if (rowPrefixes != null && !rowPrefixes.isEmpty()) { String[] rowPrefixArray = rowPrefixes.split(","); @@ -575,11 +635,20 @@ public boolean doCommandLine(final String[] args) { continue; } - final String sleepToReCompareKey = "--recomparesleep="; + final String deprecatedSleepToReCompareKey = "--recomparesleep="; + final String sleepToReCompareKey = "--recompareSleep="; + if (cmd.startsWith(deprecatedSleepToReCompareKey)) { + LOG.warn("--recomparesleep is deprecated and will be removed in 4.0.0." + + " Use --recompareSleep instead."); + sleepMsBeforeReCompare = + Integer.parseInt(cmd.substring(deprecatedSleepToReCompareKey.length())); + continue; + } if (cmd.startsWith(sleepToReCompareKey)) { sleepMsBeforeReCompare = Integer.parseInt(cmd.substring(sleepToReCompareKey.length())); continue; } + final String verboseKey = "--verbose"; if (cmd.startsWith(verboseKey)) { verbose = true; @@ -628,6 +697,25 @@ public boolean doCommandLine(final String[] args) { continue; } + final String reCompareThreadArgs = "--recompareThreads="; + if (cmd.startsWith(reCompareThreadArgs)) { + reCompareThreads = Integer.parseInt(cmd.substring(reCompareThreadArgs.length())); + continue; + } + + final String reCompareTriesKey = "--recompareTries="; + if (cmd.startsWith(reCompareTriesKey)) { + reCompareTries = Integer.parseInt(cmd.substring(reCompareTriesKey.length())); + continue; + } + + final String reCompareBackoffExponentKey = "--recompareBackoffExponent="; + if (cmd.startsWith(reCompareBackoffExponentKey)) { + reCompareBackoffExponent = + Integer.parseInt(cmd.substring(reCompareBackoffExponentKey.length())); + continue; + } + if (cmd.startsWith("--")) { printUsage("Invalid argument '" + cmd + "'"); return false; @@ -704,7 +792,8 @@ private static void printUsage(final String errorMsg) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: verifyrep [--starttime=X]" - + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " + + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recompareSleep=] " + + "[--recompareThreads=] [--recompareTries=] [--recompareBackoffExponent=]" + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); @@ -720,8 +809,14 @@ private static void printUsage(final String errorMsg) { System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + System.err.println(" recompareSleep milliseconds to sleep before recompare row, " + "default value is 0 which disables the recompare."); + System.err.println(" recompareThreads number of threads to run recompares in"); + System.err.println(" recompareTries number of recompare attempts before incrementing " + + "the BADROWS counter. Defaults to 1 recompare"); + System.out.println(" recompareBackoffExponent exponential multiplier to increase " + + "recompareSleep after each recompare attempt, " + + "default value is 0 which results in a constant sleep time"); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -788,6 +883,27 @@ private static void printUsage(final String errorMsg) { + "2181:/cluster-b \\\n" + " TestTable"); } + private static ThreadPoolExecutor buildReCompareExecutor(int maxThreads, Mapper.Context context) { + if (maxThreads == 0) { + return null; + } + + return new ThreadPoolExecutor(0, maxThreads, 1L, TimeUnit.SECONDS, new SynchronousQueue<>(), + buildRejectedReComparePolicy(context)); + } + + private static CallerRunsPolicy buildRejectedReComparePolicy(Mapper.Context context) { + return new CallerRunsPolicy() { + @Override + public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) { + LOG.debug("Re-comparison execution rejected. Running in main thread."); + context.getCounter(Counters.MAIN_THREAD_RECOMPARES).increment(1); + // will run in the current thread + super.rejectedExecution(runnable, e); + } + }; + } + @Override public int run(String[] args) throws Exception { Configuration conf = this.getConf(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java new file mode 100644 index 000000000000..47f5e606b846 --- /dev/null +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce.replication; + +import java.io.IOException; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class VerifyReplicationRecompareRunnable implements Runnable { + + private static final Logger LOG = + LoggerFactory.getLogger(VerifyReplicationRecompareRunnable.class); + + private final Mapper.Context context; + private final VerifyReplication.Verifier.Counters originalCounter; + private final String delimiter; + private final byte[] row; + private final Scan tableScan; + private final Table sourceTable; + private final Table replicatedTable; + + private final int reCompareTries; + private final int sleepMsBeforeReCompare; + private final int reCompareBackoffExponent; + private final boolean verbose; + + private Result sourceResult; + private Result replicatedResult; + + public VerifyReplicationRecompareRunnable(Mapper.Context context, Result sourceResult, + Result replicatedResult, VerifyReplication.Verifier.Counters originalCounter, String delimiter, + Scan tableScan, Table sourceTable, Table replicatedTable, int reCompareTries, + int sleepMsBeforeReCompare, int reCompareBackoffExponent, boolean verbose) { + this.context = context; + this.sourceResult = sourceResult; + this.replicatedResult = replicatedResult; + this.originalCounter = originalCounter; + this.delimiter = delimiter; + this.tableScan = tableScan; + this.sourceTable = sourceTable; + this.replicatedTable = replicatedTable; + this.reCompareTries = reCompareTries; + this.sleepMsBeforeReCompare = sleepMsBeforeReCompare; + this.reCompareBackoffExponent = reCompareBackoffExponent; + this.verbose = verbose; + this.row = VerifyReplication.getRow(sourceResult, replicatedResult); + } + + @Override + public void run() { + Get get = new Get(row); + get.setCacheBlocks(tableScan.getCacheBlocks()); + get.setFilter(tableScan.getFilter()); + + int sleepMs = sleepMsBeforeReCompare; + int tries = 0; + + while (++tries <= reCompareTries) { + context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).increment(1); + + try { + Thread.sleep(sleepMs); + } catch (InterruptedException e) { + LOG.warn("Sleeping interrupted, incrementing bad rows and aborting"); + incrementOriginalAndBadCounter(); + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + Thread.currentThread().interrupt(); + return; + } + + try { + if (fetchLatestRows(get) && matches(sourceResult, replicatedResult, null)) { + if (verbose) { + LOG.info("Good row key (with recompare): {}{}{}", delimiter, Bytes.toStringBinary(row), + delimiter); + } + context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).increment(1); + return; + } else { + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + } + } catch (IOException e) { + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + if (verbose) { + LOG.info("Got an exception during recompare for rowkey={}", Bytes.toStringBinary(row), e); + } + } + + sleepMs = sleepMs * (2 ^ reCompareBackoffExponent); + } + + LOG.error("{}, rowkey={}{}{}", originalCounter, delimiter, Bytes.toStringBinary(row), + delimiter); + incrementOriginalAndBadCounter(); + } + + public void fail() { + if (LOG.isDebugEnabled()) { + LOG.debug("Called fail on row={}", Bytes.toStringBinary(row)); + } + incrementOriginalAndBadCounter(); + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + } + + private boolean fetchLatestRows(Get get) throws IOException { + Result sourceResult = sourceTable.get(get); + Result replicatedResult = replicatedTable.get(get); + + boolean sourceMatches = matches(sourceResult, this.sourceResult, + VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED); + boolean replicatedMatches = matches(replicatedResult, this.replicatedResult, + VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED); + + this.sourceResult = sourceResult; + this.replicatedResult = replicatedResult; + return sourceMatches && replicatedMatches; + } + + private boolean matches(Result original, Result updated, + VerifyReplication.Verifier.Counters failCounter) { + try { + Result.compareResults(original, updated); + return true; + } catch (Exception e) { + if (failCounter != null) { + context.getCounter(failCounter).increment(1); + if (LOG.isDebugEnabled()) { + LOG.debug("{} for rowkey={}", failCounter, Bytes.toStringBinary(row)); + } + } + return false; + } + } + + private void incrementOriginalAndBadCounter() { + context.getCounter(originalCounter).increment(1); + context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).increment(1); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index ee77d9f6fccb..2958c5ef9114 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.junit.AfterClass; import org.junit.Before; @@ -108,7 +109,7 @@ public static void setUpBeforeClass() throws Exception { htable3 = connection2.getTable(peerTableName); } - static void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) + static Counters runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) throws IOException, InterruptedException, ClassNotFoundException { Job job = new VerifyReplication().createSubmittableJob(new Configuration(CONF1), args); if (job == null) { @@ -121,6 +122,7 @@ static void runVerifyReplication(String[] args, int expectedGoodRows, int expect job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(expectedBadRows, job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + return job.getCounters(); } /** @@ -447,6 +449,127 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti checkRestoreTmpDir(CONF2, tmpPath2, 2); } + @Test + public void testVerifyReplicationThreadedRecompares() throws Exception { + // Populate the tables with same data + runBatchCopyTest(); + + // ONLY_IN_PEER_TABLE_ROWS + Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); + put.addColumn(noRepfamName, row, row); + htable3.put(put); + + // CONTENT_DIFFERENT_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); + put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); + htable3.put(put); + + // ONLY_IN_SOURCE_TABLE_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); + put.addColumn(noRepfamName, row, row); + htable1.put(put); + + String[] args = new String[] { "--recompareThreads=10", "--recompareTries=3", + "--recompareSleep=1", "--peerTableName=" + peerTableName.getNameAsString(), + UTIL2.getClusterKey(), tableName.getNameAsString() }; + Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), + 9); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), + 1); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), + 1); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) + .getValue(), 1); + } + + @Test + public void testFailsRemainingComparesAfterShutdown() throws Exception { + // Populate the tables with same data + runBatchCopyTest(); + + // ONLY_IN_PEER_TABLE_ROWS + Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); + put.addColumn(noRepfamName, row, row); + htable3.put(put); + + // CONTENT_DIFFERENT_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); + put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); + htable3.put(put); + + // ONLY_IN_SOURCE_TABLE_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); + put.addColumn(noRepfamName, row, row); + htable1.put(put); + + /** + * recompareSleep is set to exceed how long we wait on + * {@link VerifyReplication#reCompareExecutor} termination when doing cleanup. this allows us to + * test the counter-incrementing logic if the executor still hasn't terminated after the call to + * shutdown and awaitTermination + */ + String[] args = new String[] { "--recompareThreads=1", "--recompareTries=1", + "--recompareSleep=121000", "--peerTableName=" + peerTableName.getNameAsString(), + UTIL2.getClusterKey(), tableName.getNameAsString() }; + + Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 3); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), + 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), + 1); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), + 1); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) + .getValue(), 1); + } + + @Test + public void testVerifyReplicationSynchronousRecompares() throws Exception { + // Populate the tables with same data + runBatchCopyTest(); + + // ONLY_IN_PEER_TABLE_ROWS + Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); + put.addColumn(noRepfamName, row, row); + htable3.put(put); + + // CONTENT_DIFFERENT_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); + put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); + htable3.put(put); + + // ONLY_IN_SOURCE_TABLE_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); + put.addColumn(noRepfamName, row, row); + htable1.put(put); + + String[] args = new String[] { "--recompareTries=3", "--recompareSleep=1", + "--peerTableName=" + peerTableName.getNameAsString(), UTIL2.getClusterKey(), + tableName.getNameAsString() }; + Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), + 9); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), + 1); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), + 1); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) + .getValue(), 1); + } + @AfterClass public static void tearDownAfterClass() throws Exception { htable3.close(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java new file mode 100644 index 000000000000..49c52fbcc3b3 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; +import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplicationRecompareRunnable; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.counters.GenericCounter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@Category({ ReplicationTests.class, SmallTests.class }) +@RunWith(MockitoJUnitRunner.class) +public class TestVerifyReplicationRecompareRunnable { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVerifyReplicationRecompareRunnable.class); + + @Mock + private Table sourceTable; + + @Mock + private Table replicatedTable; + + @Mock + private Mapper.Context context; + + static Result genResult(int cols) { + KeyValue[] kvs = new KeyValue[cols]; + + for (int i = 0; i < cols; ++i) { + kvs[i] = + new KeyValue(genBytes(), genBytes(), genBytes(), System.currentTimeMillis(), genBytes()); + } + + return Result.create(kvs); + } + + static byte[] genBytes() { + return Bytes.toBytes(ThreadLocalRandom.current().nextInt()); + } + + @Before + public void setUp() { + for (VerifyReplication.Verifier.Counters counter : VerifyReplication.Verifier.Counters + .values()) { + Counter emptyCounter = new GenericCounter(counter.name(), counter.name()); + when(context.getCounter(counter)).thenReturn(emptyCounter); + } + } + + @Test + public void itRecomparesGoodRow() throws IOException { + Result result = genResult(2); + + when(sourceTable.get(any(Get.class))).thenReturn(result); + when(replicatedTable.get(any(Get.class))).thenReturn(result); + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", + new Scan(), sourceTable, replicatedTable, 3, 1, 0, true); + + runnable.run(); + + assertEquals(0, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + assertEquals(0, + context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED).getValue()); + assertEquals(2, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + } + + @Test + public void itRecomparesBadRow() throws IOException { + Result replicatedResult = genResult(1); + when(sourceTable.get(any(Get.class))).thenReturn(genResult(5)); + when(replicatedTable.get(any(Get.class))).thenReturn(replicatedResult); + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + genResult(5), replicatedResult, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, + "", new Scan(), sourceTable, replicatedTable, 1, 1, 0, true); + + runnable.run(); + + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); + assertEquals(0, context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED).getValue()); + assertEquals(0, + context.getCounter(VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED).getValue()); + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + } + + @Test + public void itHandlesExceptionOnRecompare() throws IOException { + when(sourceTable.get(any(Get.class))).thenThrow(new IOException("Error!")); + when(replicatedTable.get(any(Get.class))).thenReturn(genResult(5)); + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", + new Scan(), sourceTable, replicatedTable, 1, 1, 0, true); + + runnable.run(); + + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue()); + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + } +} From 2486d32fd005244a33401e8bea91a341ca59fac1 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Wed, 2 Aug 2023 09:18:30 -0400 Subject: [PATCH 036/514] HBASE-27896 Disable hdfs readahead for pread reads (#5336) Signed-off-by: Reid Chan --- .../org/apache/hadoop/hbase/regionserver/StoreFileInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 518210398d4e..1ebe93deff65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -73,7 +73,7 @@ public class StoreFileInfo implements Configurable { Pattern.compile(String.format("^(%s|%s)\\.(.+)$", HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX)); public static final String STORE_FILE_READER_NO_READAHEAD = "hbase.store.reader.no-readahead"; - public static final boolean DEFAULT_STORE_FILE_READER_NO_READAHEAD = false; + public static final boolean DEFAULT_STORE_FILE_READER_NO_READAHEAD = true; // Configuration private Configuration conf; From 3ddc6dd70f3a9aa28338f43036db0bfd99bd0bad Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 4 Aug 2023 08:50:10 -0400 Subject: [PATCH 037/514] HBASE-27956 Support wall clock profiling in ProfilerServlet (#5337) Signed-off-by: Reid Chan --- .../main/java/org/apache/hadoop/hbase/http/ProfileServlet.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java index 2d87a2b3f26a..86f58c25bff2 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java @@ -74,6 +74,7 @@ public class ProfileServlet extends HttpServlet { enum Event { CPU("cpu"), + WALL("wall"), ALLOC("alloc"), LOCK("lock"), PAGE_FAULTS("page-faults"), From e8e128c16e435fef8a4516a08571696d1d143898 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Mon, 7 Aug 2023 08:09:58 -0400 Subject: [PATCH 038/514] Revert "VerifyReplication recompare async (#5051)" This reverts commit f6c5dbe97832d4c7d301bce48792365dd2b79e8a. --- .../replication/VerifyReplication.java | 186 ++++-------------- .../VerifyReplicationRecompareRunnable.java | 162 --------------- .../replication/TestVerifyReplication.java | 125 +----------- ...estVerifyReplicationRecompareRunnable.java | 154 --------------- 4 files changed, 36 insertions(+), 591 deletions(-) delete mode 100644 hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java delete mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 6e3650297bd3..1e268c1858b8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -19,12 +19,7 @@ import java.io.IOException; import java.util.Arrays; -import java.util.List; import java.util.UUID; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -35,6 +30,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -50,7 +46,6 @@ import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableSplit; -import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication.Verifier.Counters; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; @@ -60,12 +55,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -89,11 +84,6 @@ public class VerifyReplication extends Configured implements Tool { public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; - private static ThreadPoolExecutor reCompareExecutor = null; - int reCompareTries = 0; - int reCompareBackoffExponent = 0; - int reCompareThreads = 0; - int sleepMsBeforeReCompare = 0; long startTime = 0; long endTime = Long.MAX_VALUE; int batch = -1; @@ -104,6 +94,7 @@ public class VerifyReplication extends Configured implements Tool { String peerId = null; String peerQuorumAddress = null; String rowPrefixes = null; + int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; // Source table snapshot name @@ -133,12 +124,7 @@ public enum Counters { BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, - CONTENT_DIFFERENT_ROWS, - RECOMPARES, - MAIN_THREAD_RECOMPARES, - SOURCE_ROW_CHANGED, - PEER_ROW_CHANGED, - FAILED_RECOMPARE + CONTENT_DIFFERENT_ROWS } private Connection sourceConnection; @@ -147,9 +133,6 @@ public enum Counters { private Table replicatedTable; private ResultScanner replicatedScanner; private Result currentCompareRowInPeerTable; - private Scan tableScan; - private int reCompareTries; - private int reCompareBackoffExponent; private int sleepMsBeforeReCompare; private String delimiter = ""; private boolean verbose = false; @@ -167,12 +150,7 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); - reCompareTries = conf.getInt(NAME + ".recompareTries", 0); - reCompareBackoffExponent = conf.getInt(NAME + ".recompareBackoffExponent", 1); sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); - if (sleepMsBeforeReCompare > 0) { - reCompareTries = Math.max(reCompareTries, 1); - } delimiter = conf.get(NAME + ".delimiter", ""); verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); @@ -201,12 +179,9 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) if (versions >= 0) { scan.readVersions(versions); } - int reCompareThreads = conf.getInt(NAME + ".recompareThreads", 0); - reCompareExecutor = buildReCompareExecutor(reCompareThreads, context); TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); sourceConnection = ConnectionFactory.createConnection(conf); sourceTable = sourceConnection.getTable(tableName); - tableScan = scan; final InputSplit tableSplit = context.getInputSplit(); @@ -251,7 +226,7 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) while (true) { if (currentCompareRowInPeerTable == null) { // reach the region end of peer table, row only in source table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null); + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value); break; } int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow()); @@ -265,77 +240,55 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { - logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value, - currentCompareRowInPeerTable); + logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); } currentCompareRowInPeerTable = replicatedScanner.next(); break; } else if (rowCmpRet < 0) { // row only exists in source table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null); + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value); break; } else { // row only exists in peer table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, currentCompareRowInPeerTable); currentCompareRowInPeerTable = replicatedScanner.next(); } } } - @SuppressWarnings("FutureReturnValueIgnored") - private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row, - Result replicatedRow) { - byte[] rowKey = getRow(row, replicatedRow); - if (reCompareTries == 0) { - context.getCounter(counter).increment(1); - context.getCounter(Counters.BADROWS).increment(1); - LOG.error("{}, rowkey={}{}{}", counter, delimiter, Bytes.toStringBinary(rowKey), delimiter); - return; - } - - VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, - row, replicatedRow, counter, delimiter, tableScan, sourceTable, replicatedTable, - reCompareTries, sleepMsBeforeReCompare, reCompareBackoffExponent, verbose); - - if (reCompareExecutor == null) { - runnable.run(); - return; - } - - reCompareExecutor.submit(runnable); - } - - @Override - protected void cleanup(Context context) { - if (reCompareExecutor != null && !reCompareExecutor.isShutdown()) { - reCompareExecutor.shutdown(); + private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row) { + if (sleepMsBeforeReCompare > 0) { + Threads.sleep(sleepMsBeforeReCompare); try { - boolean terminated = reCompareExecutor.awaitTermination(1, TimeUnit.MINUTES); - if (!terminated) { - List queue = reCompareExecutor.shutdownNow(); - for (Runnable runnable : queue) { - ((VerifyReplicationRecompareRunnable) runnable).fail(); - } - - terminated = reCompareExecutor.awaitTermination(1, TimeUnit.MINUTES); - - if (!terminated) { - int activeCount = Math.max(1, reCompareExecutor.getActiveCount()); - LOG.warn("Found {} possible recompares still running in the executable" - + " incrementing BADROWS and FAILED_RECOMPARE", activeCount); - context.getCounter(Counters.BADROWS).increment(activeCount); - context.getCounter(Counters.FAILED_RECOMPARE).increment(activeCount); + Result sourceResult = sourceTable.get(new Get(row.getRow())); + Result replicatedResult = replicatedTable.get(new Get(row.getRow())); + Result.compareResults(sourceResult, replicatedResult, false); + if (!sourceResult.isEmpty()) { + context.getCounter(Counters.GOODROWS).increment(1); + if (verbose) { + LOG.info("Good row key (with recompare): " + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } - } catch (InterruptedException e) { - throw new RuntimeException("Failed to await executor termination in cleanup", e); + return; + } catch (Exception e) { + LOG.error("recompare fail after sleep, rowkey=" + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } + context.getCounter(counter).increment(1); + context.getCounter(Counters.BADROWS).increment(1); + LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + + delimiter); + } + + @Override + protected void cleanup(Context context) { if (replicatedScanner != null) { try { while (currentCompareRowInPeerTable != null) { - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, currentCompareRowInPeerTable); currentCompareRowInPeerTable = replicatedScanner.next(); } @@ -471,10 +424,6 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); - conf.setInt(NAME + ".recompareTries", reCompareTries); - conf.setInt(NAME + ".recompareBackoffExponent", reCompareBackoffExponent); - conf.setInt(NAME + ".recompareThreads", reCompareThreads); - // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -542,15 +491,6 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce return job; } - protected static byte[] getRow(Result sourceResult, Result replicatedResult) { - if (sourceResult != null) { - return sourceResult.getRow(); - } else if (replicatedResult != null) { - return replicatedResult.getRow(); - } - throw new RuntimeException("Both sourceResult and replicatedResult are null!"); - } - private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { if (rowPrefixes != null && !rowPrefixes.isEmpty()) { String[] rowPrefixArray = rowPrefixes.split(","); @@ -635,20 +575,11 @@ public boolean doCommandLine(final String[] args) { continue; } - final String deprecatedSleepToReCompareKey = "--recomparesleep="; - final String sleepToReCompareKey = "--recompareSleep="; - if (cmd.startsWith(deprecatedSleepToReCompareKey)) { - LOG.warn("--recomparesleep is deprecated and will be removed in 4.0.0." - + " Use --recompareSleep instead."); - sleepMsBeforeReCompare = - Integer.parseInt(cmd.substring(deprecatedSleepToReCompareKey.length())); - continue; - } + final String sleepToReCompareKey = "--recomparesleep="; if (cmd.startsWith(sleepToReCompareKey)) { sleepMsBeforeReCompare = Integer.parseInt(cmd.substring(sleepToReCompareKey.length())); continue; } - final String verboseKey = "--verbose"; if (cmd.startsWith(verboseKey)) { verbose = true; @@ -697,25 +628,6 @@ public boolean doCommandLine(final String[] args) { continue; } - final String reCompareThreadArgs = "--recompareThreads="; - if (cmd.startsWith(reCompareThreadArgs)) { - reCompareThreads = Integer.parseInt(cmd.substring(reCompareThreadArgs.length())); - continue; - } - - final String reCompareTriesKey = "--recompareTries="; - if (cmd.startsWith(reCompareTriesKey)) { - reCompareTries = Integer.parseInt(cmd.substring(reCompareTriesKey.length())); - continue; - } - - final String reCompareBackoffExponentKey = "--recompareBackoffExponent="; - if (cmd.startsWith(reCompareBackoffExponentKey)) { - reCompareBackoffExponent = - Integer.parseInt(cmd.substring(reCompareBackoffExponentKey.length())); - continue; - } - if (cmd.startsWith("--")) { printUsage("Invalid argument '" + cmd + "'"); return false; @@ -792,8 +704,7 @@ private static void printUsage(final String errorMsg) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: verifyrep [--starttime=X]" - + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recompareSleep=] " - + "[--recompareThreads=] [--recompareTries=] [--recompareBackoffExponent=]" + + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); @@ -809,14 +720,8 @@ private static void printUsage(final String errorMsg) { System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recompareSleep milliseconds to sleep before recompare row, " + System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + "default value is 0 which disables the recompare."); - System.err.println(" recompareThreads number of threads to run recompares in"); - System.err.println(" recompareTries number of recompare attempts before incrementing " - + "the BADROWS counter. Defaults to 1 recompare"); - System.out.println(" recompareBackoffExponent exponential multiplier to increase " - + "recompareSleep after each recompare attempt, " - + "default value is 0 which results in a constant sleep time"); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -883,27 +788,6 @@ private static void printUsage(final String errorMsg) { + "2181:/cluster-b \\\n" + " TestTable"); } - private static ThreadPoolExecutor buildReCompareExecutor(int maxThreads, Mapper.Context context) { - if (maxThreads == 0) { - return null; - } - - return new ThreadPoolExecutor(0, maxThreads, 1L, TimeUnit.SECONDS, new SynchronousQueue<>(), - buildRejectedReComparePolicy(context)); - } - - private static CallerRunsPolicy buildRejectedReComparePolicy(Mapper.Context context) { - return new CallerRunsPolicy() { - @Override - public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) { - LOG.debug("Re-comparison execution rejected. Running in main thread."); - context.getCounter(Counters.MAIN_THREAD_RECOMPARES).increment(1); - // will run in the current thread - super.rejectedExecution(runnable, e); - } - }; - } - @Override public int run(String[] args) throws Exception { Configuration conf = this.getConf(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java deleted file mode 100644 index 47f5e606b846..000000000000 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.mapreduce.replication; - -import java.io.IOException; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.Mapper; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@InterfaceAudience.Private -public class VerifyReplicationRecompareRunnable implements Runnable { - - private static final Logger LOG = - LoggerFactory.getLogger(VerifyReplicationRecompareRunnable.class); - - private final Mapper.Context context; - private final VerifyReplication.Verifier.Counters originalCounter; - private final String delimiter; - private final byte[] row; - private final Scan tableScan; - private final Table sourceTable; - private final Table replicatedTable; - - private final int reCompareTries; - private final int sleepMsBeforeReCompare; - private final int reCompareBackoffExponent; - private final boolean verbose; - - private Result sourceResult; - private Result replicatedResult; - - public VerifyReplicationRecompareRunnable(Mapper.Context context, Result sourceResult, - Result replicatedResult, VerifyReplication.Verifier.Counters originalCounter, String delimiter, - Scan tableScan, Table sourceTable, Table replicatedTable, int reCompareTries, - int sleepMsBeforeReCompare, int reCompareBackoffExponent, boolean verbose) { - this.context = context; - this.sourceResult = sourceResult; - this.replicatedResult = replicatedResult; - this.originalCounter = originalCounter; - this.delimiter = delimiter; - this.tableScan = tableScan; - this.sourceTable = sourceTable; - this.replicatedTable = replicatedTable; - this.reCompareTries = reCompareTries; - this.sleepMsBeforeReCompare = sleepMsBeforeReCompare; - this.reCompareBackoffExponent = reCompareBackoffExponent; - this.verbose = verbose; - this.row = VerifyReplication.getRow(sourceResult, replicatedResult); - } - - @Override - public void run() { - Get get = new Get(row); - get.setCacheBlocks(tableScan.getCacheBlocks()); - get.setFilter(tableScan.getFilter()); - - int sleepMs = sleepMsBeforeReCompare; - int tries = 0; - - while (++tries <= reCompareTries) { - context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).increment(1); - - try { - Thread.sleep(sleepMs); - } catch (InterruptedException e) { - LOG.warn("Sleeping interrupted, incrementing bad rows and aborting"); - incrementOriginalAndBadCounter(); - context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); - Thread.currentThread().interrupt(); - return; - } - - try { - if (fetchLatestRows(get) && matches(sourceResult, replicatedResult, null)) { - if (verbose) { - LOG.info("Good row key (with recompare): {}{}{}", delimiter, Bytes.toStringBinary(row), - delimiter); - } - context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).increment(1); - return; - } else { - context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); - } - } catch (IOException e) { - context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); - if (verbose) { - LOG.info("Got an exception during recompare for rowkey={}", Bytes.toStringBinary(row), e); - } - } - - sleepMs = sleepMs * (2 ^ reCompareBackoffExponent); - } - - LOG.error("{}, rowkey={}{}{}", originalCounter, delimiter, Bytes.toStringBinary(row), - delimiter); - incrementOriginalAndBadCounter(); - } - - public void fail() { - if (LOG.isDebugEnabled()) { - LOG.debug("Called fail on row={}", Bytes.toStringBinary(row)); - } - incrementOriginalAndBadCounter(); - context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); - } - - private boolean fetchLatestRows(Get get) throws IOException { - Result sourceResult = sourceTable.get(get); - Result replicatedResult = replicatedTable.get(get); - - boolean sourceMatches = matches(sourceResult, this.sourceResult, - VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED); - boolean replicatedMatches = matches(replicatedResult, this.replicatedResult, - VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED); - - this.sourceResult = sourceResult; - this.replicatedResult = replicatedResult; - return sourceMatches && replicatedMatches; - } - - private boolean matches(Result original, Result updated, - VerifyReplication.Verifier.Counters failCounter) { - try { - Result.compareResults(original, updated); - return true; - } catch (Exception e) { - if (failCounter != null) { - context.getCounter(failCounter).increment(1); - if (LOG.isDebugEnabled()) { - LOG.debug("{} for rowkey={}", failCounter, Bytes.toStringBinary(row)); - } - } - return false; - } - } - - private void incrementOriginalAndBadCounter() { - context.getCounter(originalCounter).increment(1); - context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).increment(1); - } -} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index 2958c5ef9114..ee77d9f6fccb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.junit.AfterClass; import org.junit.Before; @@ -109,7 +108,7 @@ public static void setUpBeforeClass() throws Exception { htable3 = connection2.getTable(peerTableName); } - static Counters runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) + static void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) throws IOException, InterruptedException, ClassNotFoundException { Job job = new VerifyReplication().createSubmittableJob(new Configuration(CONF1), args); if (job == null) { @@ -122,7 +121,6 @@ static Counters runVerifyReplication(String[] args, int expectedGoodRows, int ex job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(expectedBadRows, job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); - return job.getCounters(); } /** @@ -449,127 +447,6 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti checkRestoreTmpDir(CONF2, tmpPath2, 2); } - @Test - public void testVerifyReplicationThreadedRecompares() throws Exception { - // Populate the tables with same data - runBatchCopyTest(); - - // ONLY_IN_PEER_TABLE_ROWS - Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); - put.addColumn(noRepfamName, row, row); - htable3.put(put); - - // CONTENT_DIFFERENT_ROWS - put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); - put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); - htable3.put(put); - - // ONLY_IN_SOURCE_TABLE_ROWS - put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); - put.addColumn(noRepfamName, row, row); - htable1.put(put); - - String[] args = new String[] { "--recompareThreads=10", "--recompareTries=3", - "--recompareSleep=1", "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; - Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), - 9); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), - 1); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), - 1); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) - .getValue(), 1); - } - - @Test - public void testFailsRemainingComparesAfterShutdown() throws Exception { - // Populate the tables with same data - runBatchCopyTest(); - - // ONLY_IN_PEER_TABLE_ROWS - Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); - put.addColumn(noRepfamName, row, row); - htable3.put(put); - - // CONTENT_DIFFERENT_ROWS - put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); - put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); - htable3.put(put); - - // ONLY_IN_SOURCE_TABLE_ROWS - put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); - put.addColumn(noRepfamName, row, row); - htable1.put(put); - - /** - * recompareSleep is set to exceed how long we wait on - * {@link VerifyReplication#reCompareExecutor} termination when doing cleanup. this allows us to - * test the counter-incrementing logic if the executor still hasn't terminated after the call to - * shutdown and awaitTermination - */ - String[] args = new String[] { "--recompareThreads=1", "--recompareTries=1", - "--recompareSleep=121000", "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; - - Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 3); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), - 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), - 1); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), - 1); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) - .getValue(), 1); - } - - @Test - public void testVerifyReplicationSynchronousRecompares() throws Exception { - // Populate the tables with same data - runBatchCopyTest(); - - // ONLY_IN_PEER_TABLE_ROWS - Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); - put.addColumn(noRepfamName, row, row); - htable3.put(put); - - // CONTENT_DIFFERENT_ROWS - put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); - put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); - htable3.put(put); - - // ONLY_IN_SOURCE_TABLE_ROWS - put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); - put.addColumn(noRepfamName, row, row); - htable1.put(put); - - String[] args = new String[] { "--recompareTries=3", "--recompareSleep=1", - "--peerTableName=" + peerTableName.getNameAsString(), UTIL2.getClusterKey(), - tableName.getNameAsString() }; - Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), - 9); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), - 1); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), - 1); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) - .getValue(), 1); - } - @AfterClass public static void tearDownAfterClass() throws Exception { htable3.close(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java deleted file mode 100644 index 49c52fbcc3b3..000000000000 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication; - -import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; -import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplicationRecompareRunnable; -import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.Counter; -import org.apache.hadoop.mapreduce.Mapper; -import org.apache.hadoop.mapreduce.counters.GenericCounter; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@Category({ ReplicationTests.class, SmallTests.class }) -@RunWith(MockitoJUnitRunner.class) -public class TestVerifyReplicationRecompareRunnable { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationRecompareRunnable.class); - - @Mock - private Table sourceTable; - - @Mock - private Table replicatedTable; - - @Mock - private Mapper.Context context; - - static Result genResult(int cols) { - KeyValue[] kvs = new KeyValue[cols]; - - for (int i = 0; i < cols; ++i) { - kvs[i] = - new KeyValue(genBytes(), genBytes(), genBytes(), System.currentTimeMillis(), genBytes()); - } - - return Result.create(kvs); - } - - static byte[] genBytes() { - return Bytes.toBytes(ThreadLocalRandom.current().nextInt()); - } - - @Before - public void setUp() { - for (VerifyReplication.Verifier.Counters counter : VerifyReplication.Verifier.Counters - .values()) { - Counter emptyCounter = new GenericCounter(counter.name(), counter.name()); - when(context.getCounter(counter)).thenReturn(emptyCounter); - } - } - - @Test - public void itRecomparesGoodRow() throws IOException { - Result result = genResult(2); - - when(sourceTable.get(any(Get.class))).thenReturn(result); - when(replicatedTable.get(any(Get.class))).thenReturn(result); - - VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, - genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", - new Scan(), sourceTable, replicatedTable, 3, 1, 0, true); - - runnable.run(); - - assertEquals(0, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); - assertEquals(0, - context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); - assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); - assertEquals(1, - context.getCounter(VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED).getValue()); - assertEquals(1, - context.getCounter(VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED).getValue()); - assertEquals(2, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); - } - - @Test - public void itRecomparesBadRow() throws IOException { - Result replicatedResult = genResult(1); - when(sourceTable.get(any(Get.class))).thenReturn(genResult(5)); - when(replicatedTable.get(any(Get.class))).thenReturn(replicatedResult); - - VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, - genResult(5), replicatedResult, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, - "", new Scan(), sourceTable, replicatedTable, 1, 1, 0, true); - - runnable.run(); - - assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); - assertEquals(1, - context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); - assertEquals(0, context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); - assertEquals(1, - context.getCounter(VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED).getValue()); - assertEquals(0, - context.getCounter(VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED).getValue()); - assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); - } - - @Test - public void itHandlesExceptionOnRecompare() throws IOException { - when(sourceTable.get(any(Get.class))).thenThrow(new IOException("Error!")); - when(replicatedTable.get(any(Get.class))).thenReturn(genResult(5)); - - VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, - genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", - new Scan(), sourceTable, replicatedTable, 1, 1, 0, true); - - runnable.run(); - - assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); - assertEquals(1, - context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); - assertEquals(1, - context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue()); - assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); - } -} From 7cfa47dfe000c74db1ba9292ad10bed9e48f6c3b Mon Sep 17 00:00:00 2001 From: Hernan Romer Date: Mon, 31 Jul 2023 17:50:57 -0400 Subject: [PATCH 039/514] HBASE-26874 VerifyReplication recompare async (#5051) Signed-off-by: Bryan Beaudreault --- .../replication/VerifyReplication.java | 186 ++++++++++++++---- .../VerifyReplicationRecompareRunnable.java | 162 +++++++++++++++ .../replication/TestVerifyReplication.java | 125 +++++++++++- ...estVerifyReplicationRecompareRunnable.java | 154 +++++++++++++++ 4 files changed, 591 insertions(+), 36 deletions(-) create mode 100644 hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 1e268c1858b8..6e3650297bd3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -19,7 +19,12 @@ import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.UUID; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -30,7 +35,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -46,6 +50,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableSplit; +import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication.Verifier.Counters; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; @@ -55,12 +60,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -84,6 +89,11 @@ public class VerifyReplication extends Configured implements Tool { public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; + private static ThreadPoolExecutor reCompareExecutor = null; + int reCompareTries = 0; + int reCompareBackoffExponent = 0; + int reCompareThreads = 0; + int sleepMsBeforeReCompare = 0; long startTime = 0; long endTime = Long.MAX_VALUE; int batch = -1; @@ -94,7 +104,6 @@ public class VerifyReplication extends Configured implements Tool { String peerId = null; String peerQuorumAddress = null; String rowPrefixes = null; - int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; // Source table snapshot name @@ -124,7 +133,12 @@ public enum Counters { BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, - CONTENT_DIFFERENT_ROWS + CONTENT_DIFFERENT_ROWS, + RECOMPARES, + MAIN_THREAD_RECOMPARES, + SOURCE_ROW_CHANGED, + PEER_ROW_CHANGED, + FAILED_RECOMPARE } private Connection sourceConnection; @@ -133,6 +147,9 @@ public enum Counters { private Table replicatedTable; private ResultScanner replicatedScanner; private Result currentCompareRowInPeerTable; + private Scan tableScan; + private int reCompareTries; + private int reCompareBackoffExponent; private int sleepMsBeforeReCompare; private String delimiter = ""; private boolean verbose = false; @@ -150,7 +167,12 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); + reCompareTries = conf.getInt(NAME + ".recompareTries", 0); + reCompareBackoffExponent = conf.getInt(NAME + ".recompareBackoffExponent", 1); sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); + if (sleepMsBeforeReCompare > 0) { + reCompareTries = Math.max(reCompareTries, 1); + } delimiter = conf.get(NAME + ".delimiter", ""); verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); @@ -179,9 +201,12 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) if (versions >= 0) { scan.readVersions(versions); } + int reCompareThreads = conf.getInt(NAME + ".recompareThreads", 0); + reCompareExecutor = buildReCompareExecutor(reCompareThreads, context); TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); sourceConnection = ConnectionFactory.createConnection(conf); sourceTable = sourceConnection.getTable(tableName); + tableScan = scan; final InputSplit tableSplit = context.getInputSplit(); @@ -226,7 +251,7 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) while (true) { if (currentCompareRowInPeerTable == null) { // reach the region end of peer table, row only in source table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value); + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null); break; } int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow()); @@ -240,55 +265,77 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { - logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); + logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value, + currentCompareRowInPeerTable); } currentCompareRowInPeerTable = replicatedScanner.next(); break; } else if (rowCmpRet < 0) { // row only exists in source table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value); + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null); break; } else { // row only exists in peer table - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, currentCompareRowInPeerTable); currentCompareRowInPeerTable = replicatedScanner.next(); } } } - private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row) { - if (sleepMsBeforeReCompare > 0) { - Threads.sleep(sleepMsBeforeReCompare); - try { - Result sourceResult = sourceTable.get(new Get(row.getRow())); - Result replicatedResult = replicatedTable.get(new Get(row.getRow())); - Result.compareResults(sourceResult, replicatedResult, false); - if (!sourceResult.isEmpty()) { - context.getCounter(Counters.GOODROWS).increment(1); - if (verbose) { - LOG.info("Good row key (with recompare): " + delimiter - + Bytes.toStringBinary(row.getRow()) + delimiter); - } - } - return; - } catch (Exception e) { - LOG.error("recompare fail after sleep, rowkey=" + delimiter - + Bytes.toStringBinary(row.getRow()) + delimiter); - } + @SuppressWarnings("FutureReturnValueIgnored") + private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row, + Result replicatedRow) { + byte[] rowKey = getRow(row, replicatedRow); + if (reCompareTries == 0) { + context.getCounter(counter).increment(1); + context.getCounter(Counters.BADROWS).increment(1); + LOG.error("{}, rowkey={}{}{}", counter, delimiter, Bytes.toStringBinary(rowKey), delimiter); + return; + } + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + row, replicatedRow, counter, delimiter, tableScan, sourceTable, replicatedTable, + reCompareTries, sleepMsBeforeReCompare, reCompareBackoffExponent, verbose); + + if (reCompareExecutor == null) { + runnable.run(); + return; } - context.getCounter(counter).increment(1); - context.getCounter(Counters.BADROWS).increment(1); - LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) - + delimiter); + + reCompareExecutor.submit(runnable); } @Override protected void cleanup(Context context) { + if (reCompareExecutor != null && !reCompareExecutor.isShutdown()) { + reCompareExecutor.shutdown(); + try { + boolean terminated = reCompareExecutor.awaitTermination(1, TimeUnit.MINUTES); + if (!terminated) { + List queue = reCompareExecutor.shutdownNow(); + for (Runnable runnable : queue) { + ((VerifyReplicationRecompareRunnable) runnable).fail(); + } + + terminated = reCompareExecutor.awaitTermination(1, TimeUnit.MINUTES); + + if (!terminated) { + int activeCount = Math.max(1, reCompareExecutor.getActiveCount()); + LOG.warn("Found {} possible recompares still running in the executable" + + " incrementing BADROWS and FAILED_RECOMPARE", activeCount); + context.getCounter(Counters.BADROWS).increment(activeCount); + context.getCounter(Counters.FAILED_RECOMPARE).increment(activeCount); + } + } + } catch (InterruptedException e) { + throw new RuntimeException("Failed to await executor termination in cleanup", e); + } + } if (replicatedScanner != null) { try { while (currentCompareRowInPeerTable != null) { - logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, + logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, currentCompareRowInPeerTable); currentCompareRowInPeerTable = replicatedScanner.next(); } @@ -424,6 +471,10 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); + conf.setInt(NAME + ".recompareTries", reCompareTries); + conf.setInt(NAME + ".recompareBackoffExponent", reCompareBackoffExponent); + conf.setInt(NAME + ".recompareThreads", reCompareThreads); + // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -491,6 +542,15 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce return job; } + protected static byte[] getRow(Result sourceResult, Result replicatedResult) { + if (sourceResult != null) { + return sourceResult.getRow(); + } else if (replicatedResult != null) { + return replicatedResult.getRow(); + } + throw new RuntimeException("Both sourceResult and replicatedResult are null!"); + } + private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { if (rowPrefixes != null && !rowPrefixes.isEmpty()) { String[] rowPrefixArray = rowPrefixes.split(","); @@ -575,11 +635,20 @@ public boolean doCommandLine(final String[] args) { continue; } - final String sleepToReCompareKey = "--recomparesleep="; + final String deprecatedSleepToReCompareKey = "--recomparesleep="; + final String sleepToReCompareKey = "--recompareSleep="; + if (cmd.startsWith(deprecatedSleepToReCompareKey)) { + LOG.warn("--recomparesleep is deprecated and will be removed in 4.0.0." + + " Use --recompareSleep instead."); + sleepMsBeforeReCompare = + Integer.parseInt(cmd.substring(deprecatedSleepToReCompareKey.length())); + continue; + } if (cmd.startsWith(sleepToReCompareKey)) { sleepMsBeforeReCompare = Integer.parseInt(cmd.substring(sleepToReCompareKey.length())); continue; } + final String verboseKey = "--verbose"; if (cmd.startsWith(verboseKey)) { verbose = true; @@ -628,6 +697,25 @@ public boolean doCommandLine(final String[] args) { continue; } + final String reCompareThreadArgs = "--recompareThreads="; + if (cmd.startsWith(reCompareThreadArgs)) { + reCompareThreads = Integer.parseInt(cmd.substring(reCompareThreadArgs.length())); + continue; + } + + final String reCompareTriesKey = "--recompareTries="; + if (cmd.startsWith(reCompareTriesKey)) { + reCompareTries = Integer.parseInt(cmd.substring(reCompareTriesKey.length())); + continue; + } + + final String reCompareBackoffExponentKey = "--recompareBackoffExponent="; + if (cmd.startsWith(reCompareBackoffExponentKey)) { + reCompareBackoffExponent = + Integer.parseInt(cmd.substring(reCompareBackoffExponentKey.length())); + continue; + } + if (cmd.startsWith("--")) { printUsage("Invalid argument '" + cmd + "'"); return false; @@ -704,7 +792,8 @@ private static void printUsage(final String errorMsg) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: verifyrep [--starttime=X]" - + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " + + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recompareSleep=] " + + "[--recompareThreads=] [--recompareTries=] [--recompareBackoffExponent=]" + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] " + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] " + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] "); @@ -720,8 +809,14 @@ private static void printUsage(final String errorMsg) { System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + System.err.println(" recompareSleep milliseconds to sleep before recompare row, " + "default value is 0 which disables the recompare."); + System.err.println(" recompareThreads number of threads to run recompares in"); + System.err.println(" recompareTries number of recompare attempts before incrementing " + + "the BADROWS counter. Defaults to 1 recompare"); + System.out.println(" recompareBackoffExponent exponential multiplier to increase " + + "recompareSleep after each recompare attempt, " + + "default value is 0 which results in a constant sleep time"); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -788,6 +883,27 @@ private static void printUsage(final String errorMsg) { + "2181:/cluster-b \\\n" + " TestTable"); } + private static ThreadPoolExecutor buildReCompareExecutor(int maxThreads, Mapper.Context context) { + if (maxThreads == 0) { + return null; + } + + return new ThreadPoolExecutor(0, maxThreads, 1L, TimeUnit.SECONDS, new SynchronousQueue<>(), + buildRejectedReComparePolicy(context)); + } + + private static CallerRunsPolicy buildRejectedReComparePolicy(Mapper.Context context) { + return new CallerRunsPolicy() { + @Override + public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) { + LOG.debug("Re-comparison execution rejected. Running in main thread."); + context.getCounter(Counters.MAIN_THREAD_RECOMPARES).increment(1); + // will run in the current thread + super.rejectedExecution(runnable, e); + } + }; + } + @Override public int run(String[] args) throws Exception { Configuration conf = this.getConf(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java new file mode 100644 index 000000000000..47f5e606b846 --- /dev/null +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplicationRecompareRunnable.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce.replication; + +import java.io.IOException; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class VerifyReplicationRecompareRunnable implements Runnable { + + private static final Logger LOG = + LoggerFactory.getLogger(VerifyReplicationRecompareRunnable.class); + + private final Mapper.Context context; + private final VerifyReplication.Verifier.Counters originalCounter; + private final String delimiter; + private final byte[] row; + private final Scan tableScan; + private final Table sourceTable; + private final Table replicatedTable; + + private final int reCompareTries; + private final int sleepMsBeforeReCompare; + private final int reCompareBackoffExponent; + private final boolean verbose; + + private Result sourceResult; + private Result replicatedResult; + + public VerifyReplicationRecompareRunnable(Mapper.Context context, Result sourceResult, + Result replicatedResult, VerifyReplication.Verifier.Counters originalCounter, String delimiter, + Scan tableScan, Table sourceTable, Table replicatedTable, int reCompareTries, + int sleepMsBeforeReCompare, int reCompareBackoffExponent, boolean verbose) { + this.context = context; + this.sourceResult = sourceResult; + this.replicatedResult = replicatedResult; + this.originalCounter = originalCounter; + this.delimiter = delimiter; + this.tableScan = tableScan; + this.sourceTable = sourceTable; + this.replicatedTable = replicatedTable; + this.reCompareTries = reCompareTries; + this.sleepMsBeforeReCompare = sleepMsBeforeReCompare; + this.reCompareBackoffExponent = reCompareBackoffExponent; + this.verbose = verbose; + this.row = VerifyReplication.getRow(sourceResult, replicatedResult); + } + + @Override + public void run() { + Get get = new Get(row); + get.setCacheBlocks(tableScan.getCacheBlocks()); + get.setFilter(tableScan.getFilter()); + + int sleepMs = sleepMsBeforeReCompare; + int tries = 0; + + while (++tries <= reCompareTries) { + context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).increment(1); + + try { + Thread.sleep(sleepMs); + } catch (InterruptedException e) { + LOG.warn("Sleeping interrupted, incrementing bad rows and aborting"); + incrementOriginalAndBadCounter(); + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + Thread.currentThread().interrupt(); + return; + } + + try { + if (fetchLatestRows(get) && matches(sourceResult, replicatedResult, null)) { + if (verbose) { + LOG.info("Good row key (with recompare): {}{}{}", delimiter, Bytes.toStringBinary(row), + delimiter); + } + context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).increment(1); + return; + } else { + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + } + } catch (IOException e) { + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + if (verbose) { + LOG.info("Got an exception during recompare for rowkey={}", Bytes.toStringBinary(row), e); + } + } + + sleepMs = sleepMs * (2 ^ reCompareBackoffExponent); + } + + LOG.error("{}, rowkey={}{}{}", originalCounter, delimiter, Bytes.toStringBinary(row), + delimiter); + incrementOriginalAndBadCounter(); + } + + public void fail() { + if (LOG.isDebugEnabled()) { + LOG.debug("Called fail on row={}", Bytes.toStringBinary(row)); + } + incrementOriginalAndBadCounter(); + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).increment(1); + } + + private boolean fetchLatestRows(Get get) throws IOException { + Result sourceResult = sourceTable.get(get); + Result replicatedResult = replicatedTable.get(get); + + boolean sourceMatches = matches(sourceResult, this.sourceResult, + VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED); + boolean replicatedMatches = matches(replicatedResult, this.replicatedResult, + VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED); + + this.sourceResult = sourceResult; + this.replicatedResult = replicatedResult; + return sourceMatches && replicatedMatches; + } + + private boolean matches(Result original, Result updated, + VerifyReplication.Verifier.Counters failCounter) { + try { + Result.compareResults(original, updated); + return true; + } catch (Exception e) { + if (failCounter != null) { + context.getCounter(failCounter).increment(1); + if (LOG.isDebugEnabled()) { + LOG.debug("{} for rowkey={}", failCounter, Bytes.toStringBinary(row)); + } + } + return false; + } + } + + private void incrementOriginalAndBadCounter() { + context.getCounter(originalCounter).increment(1); + context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).increment(1); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index ee77d9f6fccb..2958c5ef9114 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.junit.AfterClass; import org.junit.Before; @@ -108,7 +109,7 @@ public static void setUpBeforeClass() throws Exception { htable3 = connection2.getTable(peerTableName); } - static void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) + static Counters runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) throws IOException, InterruptedException, ClassNotFoundException { Job job = new VerifyReplication().createSubmittableJob(new Configuration(CONF1), args); if (job == null) { @@ -121,6 +122,7 @@ static void runVerifyReplication(String[] args, int expectedGoodRows, int expect job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(expectedBadRows, job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + return job.getCounters(); } /** @@ -447,6 +449,127 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti checkRestoreTmpDir(CONF2, tmpPath2, 2); } + @Test + public void testVerifyReplicationThreadedRecompares() throws Exception { + // Populate the tables with same data + runBatchCopyTest(); + + // ONLY_IN_PEER_TABLE_ROWS + Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); + put.addColumn(noRepfamName, row, row); + htable3.put(put); + + // CONTENT_DIFFERENT_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); + put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); + htable3.put(put); + + // ONLY_IN_SOURCE_TABLE_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); + put.addColumn(noRepfamName, row, row); + htable1.put(put); + + String[] args = new String[] { "--recompareThreads=10", "--recompareTries=3", + "--recompareSleep=1", "--peerTableName=" + peerTableName.getNameAsString(), + UTIL2.getClusterKey(), tableName.getNameAsString() }; + Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), + 9); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), + 1); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), + 1); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) + .getValue(), 1); + } + + @Test + public void testFailsRemainingComparesAfterShutdown() throws Exception { + // Populate the tables with same data + runBatchCopyTest(); + + // ONLY_IN_PEER_TABLE_ROWS + Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); + put.addColumn(noRepfamName, row, row); + htable3.put(put); + + // CONTENT_DIFFERENT_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); + put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); + htable3.put(put); + + // ONLY_IN_SOURCE_TABLE_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); + put.addColumn(noRepfamName, row, row); + htable1.put(put); + + /** + * recompareSleep is set to exceed how long we wait on + * {@link VerifyReplication#reCompareExecutor} termination when doing cleanup. this allows us to + * test the counter-incrementing logic if the executor still hasn't terminated after the call to + * shutdown and awaitTermination + */ + String[] args = new String[] { "--recompareThreads=1", "--recompareTries=1", + "--recompareSleep=121000", "--peerTableName=" + peerTableName.getNameAsString(), + UTIL2.getClusterKey(), tableName.getNameAsString() }; + + Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 3); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), + 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), + 1); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), + 1); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) + .getValue(), 1); + } + + @Test + public void testVerifyReplicationSynchronousRecompares() throws Exception { + // Populate the tables with same data + runBatchCopyTest(); + + // ONLY_IN_PEER_TABLE_ROWS + Put put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH))); + put.addColumn(noRepfamName, row, row); + htable3.put(put); + + // CONTENT_DIFFERENT_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH - 1))); + put.addColumn(noRepfamName, row, Bytes.toBytes("diff value")); + htable3.put(put); + + // ONLY_IN_SOURCE_TABLE_ROWS + put = new Put(Bytes.toBytes(Integer.toString(NB_ROWS_IN_BATCH + 1))); + put.addColumn(noRepfamName, row, row); + htable1.put(put); + + String[] args = new String[] { "--recompareTries=3", "--recompareSleep=1", + "--peerTableName=" + peerTableName.getNameAsString(), UTIL2.getClusterKey(), + tableName.getNameAsString() }; + Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), + 9); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), + 1); + assertEquals( + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), + 1); + assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) + .getValue(), 1); + } + @AfterClass public static void tearDownAfterClass() throws Exception { htable3.close(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java new file mode 100644 index 000000000000..49c52fbcc3b3 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; +import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplicationRecompareRunnable; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.counters.GenericCounter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@Category({ ReplicationTests.class, SmallTests.class }) +@RunWith(MockitoJUnitRunner.class) +public class TestVerifyReplicationRecompareRunnable { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVerifyReplicationRecompareRunnable.class); + + @Mock + private Table sourceTable; + + @Mock + private Table replicatedTable; + + @Mock + private Mapper.Context context; + + static Result genResult(int cols) { + KeyValue[] kvs = new KeyValue[cols]; + + for (int i = 0; i < cols; ++i) { + kvs[i] = + new KeyValue(genBytes(), genBytes(), genBytes(), System.currentTimeMillis(), genBytes()); + } + + return Result.create(kvs); + } + + static byte[] genBytes() { + return Bytes.toBytes(ThreadLocalRandom.current().nextInt()); + } + + @Before + public void setUp() { + for (VerifyReplication.Verifier.Counters counter : VerifyReplication.Verifier.Counters + .values()) { + Counter emptyCounter = new GenericCounter(counter.name(), counter.name()); + when(context.getCounter(counter)).thenReturn(emptyCounter); + } + } + + @Test + public void itRecomparesGoodRow() throws IOException { + Result result = genResult(2); + + when(sourceTable.get(any(Get.class))).thenReturn(result); + when(replicatedTable.get(any(Get.class))).thenReturn(result); + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", + new Scan(), sourceTable, replicatedTable, 3, 1, 0, true); + + runnable.run(); + + assertEquals(0, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + assertEquals(0, + context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED).getValue()); + assertEquals(2, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + } + + @Test + public void itRecomparesBadRow() throws IOException { + Result replicatedResult = genResult(1); + when(sourceTable.get(any(Get.class))).thenReturn(genResult(5)); + when(replicatedTable.get(any(Get.class))).thenReturn(replicatedResult); + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + genResult(5), replicatedResult, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, + "", new Scan(), sourceTable, replicatedTable, 1, 1, 0, true); + + runnable.run(); + + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); + assertEquals(0, context.getCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.SOURCE_ROW_CHANGED).getValue()); + assertEquals(0, + context.getCounter(VerifyReplication.Verifier.Counters.PEER_ROW_CHANGED).getValue()); + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + } + + @Test + public void itHandlesExceptionOnRecompare() throws IOException { + when(sourceTable.get(any(Get.class))).thenThrow(new IOException("Error!")); + when(replicatedTable.get(any(Get.class))).thenReturn(genResult(5)); + + VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, + genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", + new Scan(), sourceTable, replicatedTable, 1, 1, 0, true); + + runnable.run(); + + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); + assertEquals(1, + context.getCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue()); + assertEquals(1, context.getCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + } +} From ab4b1d884f13d082a0906c3c3e9ec96bce7f3717 Mon Sep 17 00:00:00 2001 From: guluo Date: Thu, 10 Aug 2023 01:49:39 +0800 Subject: [PATCH 040/514] HBASE-28011 : The logStats about LruBlockCache is not accurate (#5344) --- .../apache/hadoop/hbase/io/hfile/LruBlockCache.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 2b79121a3aa6..9691217eec17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1015,12 +1015,13 @@ public void run() { public void logStats() { // Log size - long totalSize = heapSize(); - long freeSize = maxSize - totalSize; - LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" - + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " - + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " - + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + long usedSize = heapSize(); + long freeSize = maxSize - usedSize; + LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(maxSize) + ", " + "usedSize=" + + StringUtils.byteDesc(usedSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + "blockCount=" + getBlockCount() + ", " + + "accesses=" + stats.getRequestCount() + ", " + "hits=" + stats.getHitCount() + ", " + + "hitRatio=" + (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) From 36c158fce27a2f2dd7832e77538486ebccb2f42d Mon Sep 17 00:00:00 2001 From: terrytlu <112375429+terrytlu@users.noreply.github.com> Date: Sun, 13 Aug 2023 17:51:51 +0800 Subject: [PATCH 041/514] HBASE-27979 some syntax errors detected by IDEA in pom.xml (#5327) Signed-off-by: Duo Zhang --- hbase-annotations/pom.xml | 4 ++-- hbase-archetypes/hbase-archetype-builder/pom.xml | 4 ++-- hbase-archetypes/hbase-client-project/pom.xml | 4 ++-- hbase-archetypes/hbase-shaded-client-project/pom.xml | 4 ++-- hbase-archetypes/pom.xml | 2 +- hbase-assembly/pom.xml | 2 +- hbase-asyncfs/pom.xml | 2 +- hbase-backup/pom.xml | 2 +- hbase-balancer/pom.xml | 2 +- hbase-build-configuration/pom.xml | 4 ++-- hbase-checkstyle/pom.xml | 4 ++-- hbase-client/pom.xml | 2 +- hbase-common/pom.xml | 2 +- hbase-compression/hbase-compression-aircompressor/pom.xml | 2 +- hbase-compression/hbase-compression-brotli/pom.xml | 2 +- hbase-compression/hbase-compression-lz4/pom.xml | 2 +- hbase-compression/hbase-compression-snappy/pom.xml | 2 +- hbase-compression/hbase-compression-xz/pom.xml | 2 +- hbase-compression/hbase-compression-zstd/pom.xml | 2 +- hbase-endpoint/pom.xml | 2 +- hbase-examples/pom.xml | 2 +- hbase-external-blockcache/pom.xml | 2 +- hbase-hadoop-compat/pom.xml | 2 +- hbase-http/pom.xml | 2 +- hbase-it/pom.xml | 2 +- hbase-logging/pom.xml | 2 +- hbase-mapreduce/pom.xml | 2 +- hbase-metrics-api/pom.xml | 2 +- hbase-metrics/pom.xml | 2 +- hbase-procedure/pom.xml | 2 +- hbase-protocol-shaded/pom.xml | 2 +- hbase-replication/pom.xml | 2 +- hbase-resource-bundle/pom.xml | 2 +- hbase-rest/pom.xml | 2 +- hbase-server/pom.xml | 2 +- hbase-shaded/hbase-shaded-check-invariants/pom.xml | 4 ++-- hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml | 4 ++-- hbase-shaded/hbase-shaded-client/pom.xml | 4 ++-- hbase-shaded/hbase-shaded-mapreduce/pom.xml | 4 ++-- hbase-shaded/hbase-shaded-testing-util/pom.xml | 2 +- .../hbase-shaded-with-hadoop-check-invariants/pom.xml | 4 ++-- hbase-shaded/pom.xml | 2 +- hbase-shell/pom.xml | 2 +- hbase-testing-util/pom.xml | 2 +- hbase-thrift/pom.xml | 2 +- hbase-zookeeper/pom.xml | 2 +- pom.xml | 2 +- 47 files changed, 58 insertions(+), 58 deletions(-) diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 99389c91a2ea..2dc7c6102722 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -1,5 +1,5 @@ - + - + 4.0.0 org.apache.hbase hbase ${revision} - ../.. + ../../pom.xml hbase-shaded-check-invariants pom diff --git a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml index df2f394c1a92..1fe319ba0af9 100644 --- a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml +++ b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml @@ -1,5 +1,5 @@ - + - + 4.0.0 org.apache.hbase hbase ${revision} - ../.. + ../../pom.xml hbase-shaded-with-hadoop-check-invariants pom diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index 6477a16a877c..151cc4e49a4b 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -1,5 +1,5 @@ - + - - io.netty - netty - 3.6.2.Final - test - org.apache.hadoop hadoop-minikdc From 20c9e4ba5f66dd959e7c62a1a8090164a329f571 Mon Sep 17 00:00:00 2001 From: Ruanhui <32773751+frostruan@users.noreply.github.com> Date: Mon, 14 Aug 2023 21:52:03 +0800 Subject: [PATCH 051/514] HBASE-26867 Introduce a FlushProcedure (#5256) Co-authored-by: huiruan <876107431@qq.com> Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/Admin.java | 9 + .../hbase/client/AdminOverAsyncAdmin.java | 5 + .../hadoop/hbase/client/AsyncAdmin.java | 8 + .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 + .../hbase/client/RawAsyncHBaseAdmin.java | 62 ++++- .../shaded/protobuf/RequestConverter.java | 13 + .../org/apache/hadoop/hbase/util/Strings.java | 6 + .../main/protobuf/server/master/Master.proto | 14 ++ .../server/master/MasterProcedure.proto | 20 ++ .../hadoop/hbase/executor/EventType.java | 8 +- .../hadoop/hbase/executor/ExecutorType.java | 4 +- .../apache/hadoop/hbase/master/HMaster.java | 31 +++ .../hbase/master/MasterRpcServices.java | 19 ++ .../hadoop/hbase/master/MasterServices.java | 11 + .../procedure/FlushRegionProcedure.java | 238 ++++++++++++++++++ .../master/procedure/FlushTableProcedure.java | 199 +++++++++++++++ .../procedure/TableProcedureInterface.java | 1 + .../hbase/master/procedure/TableQueue.java | 1 + .../flush/FlushTableSubprocedure.java | 21 +- .../MasterFlushTableProcedureManager.java | 10 +- ...egionServerFlushTableProcedureManager.java | 18 +- .../regionserver/FlushRegionCallable.java | 83 ++++++ .../hbase/regionserver/HRegionServer.java | 4 + .../hbase/master/MockNoopMasterServices.java | 6 + .../procedure/TestFlushTableProcedure.java | 69 +++++ .../TestFlushTableProcedureBase.java | 97 +++++++ ...TestFlushTableProcedureMasterRestarts.java | 76 ++++++ ...edureWithDoNotSupportFlushTableMaster.java | 82 ++++++ .../TestFlushWithThroughputController.java | 10 +- .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 4 + .../hbase/thrift2/client/ThriftAdmin.java | 5 + 31 files changed, 1111 insertions(+), 28 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushTableProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureBase.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureMasterRestarts.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureWithDoNotSupportFlushTableMaster.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 547c6f4024a7..4d579c16af26 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -550,6 +550,15 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] */ void flush(TableName tableName, byte[] columnFamily) throws IOException; + /** + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. + * @param tableName table to flush + * @param columnFamilies column families within a table + * @throws IOException if a remote or network exception occurs + */ + void flush(TableName tableName, List columnFamilies) throws IOException; + /** * Flush an individual region. Synchronous operation. * @param regionName region to flush diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 48ce3dd9b016..690b6406fd3a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -264,6 +264,11 @@ public void flush(TableName tableName, byte[] columnFamily) throws IOException { get(admin.flush(tableName, columnFamily)); } + @Override + public void flush(TableName tableName, List columnFamilies) throws IOException { + get(admin.flush(tableName, columnFamilies)); + } + @Override public void flushRegion(byte[] regionName) throws IOException { get(admin.flushRegion(regionName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 473773e65cec..960982f5e3f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -347,6 +347,14 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, */ CompletableFuture flush(TableName tableName, byte[] columnFamily); + /** + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. + * @param tableName table to flush + * @param columnFamilies column families within a table + */ + CompletableFuture flush(TableName tableName, List columnFamilies); + /** * Flush an individual region. * @param regionName region to flush diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 0fe99afbba8f..5ee8a6ab8269 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -269,6 +269,11 @@ public CompletableFuture flush(TableName tableName, byte[] columnFamily) { return wrap(rawAdmin.flush(tableName, columnFamily)); } + @Override + public CompletableFuture flush(TableName tableName, List columnFamilies) { + return wrap(rawAdmin.flush(tableName, columnFamilies)); + } + @Override public CompletableFuture flushRegion(byte[] regionName) { return wrap(rawAdmin.flushRegion(regionName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 9b3baec87c7e..ee1dfac16bd3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterMetricsBuilder; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -96,6 +97,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -180,6 +182,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; @@ -950,12 +954,50 @@ public CompletableFuture> getRegions(TableName tableName) { @Override public CompletableFuture flush(TableName tableName) { - return flush(tableName, null); + return flush(tableName, Collections.emptyList()); } @Override public CompletableFuture flush(TableName tableName, byte[] columnFamily) { + return flush(tableName, Collections.singletonList(columnFamily)); + } + + @Override + public CompletableFuture flush(TableName tableName, List columnFamilyList) { + // This is for keeping compatibility with old implementation. + // If the server version is lower than the client version, it's possible that the + // flushTable method is not present in the server side, if so, we need to fall back + // to the old implementation. + List columnFamilies = columnFamilyList.stream() + .filter(cf -> cf != null && cf.length > 0).distinct().collect(Collectors.toList()); + FlushTableRequest request = RequestConverter.buildFlushTableRequest(tableName, columnFamilies, + ng.getNonceGroup(), ng.newNonce()); + CompletableFuture procFuture = this. procedureCall( + tableName, request, (s, c, req, done) -> s.flushTable(c, req, done), + (resp) -> resp.getProcId(), new FlushTableProcedureBiConsumer(tableName)); CompletableFuture future = new CompletableFuture<>(); + addListener(procFuture, (ret, error) -> { + if (error != null) { + if (error instanceof TableNotFoundException || error instanceof TableNotEnabledException) { + future.completeExceptionally(error); + } else if (error instanceof DoNotRetryIOException) { + // usually this is caused by the method is not present on the server or + // the hbase hadoop version does not match the running hadoop version. + // if that happens, we need fall back to the old flush implementation. + LOG.info("Unrecoverable error in master side. Fallback to FlushTableProcedure V1", error); + legacyFlush(future, tableName, columnFamilies); + } else { + future.completeExceptionally(error); + } + } else { + future.complete(ret); + } + }); + return future; + } + + private void legacyFlush(CompletableFuture future, TableName tableName, + List columnFamilies) { addListener(tableExists(tableName), (exists, err) -> { if (err != null) { future.completeExceptionally(err); @@ -969,8 +1011,9 @@ public CompletableFuture flush(TableName tableName, byte[] columnFamily) { future.completeExceptionally(new TableNotEnabledException(tableName)); } else { Map props = new HashMap<>(); - if (columnFamily != null) { - props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily)); + if (columnFamilies != null && !columnFamilies.isEmpty()) { + props.put(HConstants.FAMILY_KEY_STR, Strings.JOINER + .join(columnFamilies.stream().map(Bytes::toString).collect(Collectors.toList()))); } addListener( execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), props), @@ -985,7 +1028,6 @@ public CompletableFuture flush(TableName tableName, byte[] columnFamily) { }); } }); - return future; } @Override @@ -2768,6 +2810,18 @@ String getOperationType() { } } + private static class FlushTableProcedureBiConsumer extends TableProcedureBiConsumer { + + FlushTableProcedureBiConsumer(TableName tableName) { + super(tableName); + } + + @Override + String getOperationType() { + return "FLUSH"; + } + } + private static class CreateNamespaceProcedureBiConsumer extends NamespaceProcedureBiConsumer { CreateNamespaceProcedureBiConsumer(String namespaceName) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 9c88b61fd678..33884158da48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; @@ -1714,4 +1715,16 @@ public static RemoveServersRequest buildRemoveServersRequest(Set

server } return RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); } + + public static FlushTableRequest buildFlushTableRequest(final TableName tableName, + final List columnFamilies, final long nonceGroup, final long nonce) { + FlushTableRequest.Builder builder = FlushTableRequest.newBuilder(); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); + if (!columnFamilies.isEmpty()) { + for (byte[] columnFamily : columnFamilies) { + builder.addColumnFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); + } + } + return builder.setNonceGroup(nonceGroup).setNonce(nonce).build(); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java index cdf5bf63fb59..3baab9cca211 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java @@ -20,6 +20,9 @@ import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Joiner; +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; + /** * Utility for Strings. */ @@ -28,6 +31,9 @@ public final class Strings { public static final String DEFAULT_SEPARATOR = "="; public static final String DEFAULT_KEYVALUE_SEPARATOR = ", "; + public static final Joiner JOINER = Joiner.on(","); + public static final Splitter SPLITTER = Splitter.on(","); + private Strings() { } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 6f512a921523..5d715fdcdd16 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -200,6 +200,17 @@ message ModifyTableResponse { optional uint64 proc_id = 1; } +message FlushTableRequest { + required TableName table_name = 1; + repeated bytes column_family = 2; + optional uint64 nonce_group = 3 [default = 0]; + optional uint64 nonce = 4 [default = 0]; +} + +message FlushTableResponse { + optional uint64 proc_id = 1; +} + /* Namespace-level protobufs */ message CreateNamespaceRequest { @@ -1239,6 +1250,9 @@ service MasterService { rpc FlushMasterStore(FlushMasterStoreRequest) returns(FlushMasterStoreResponse); + + rpc FlushTable(FlushTableRequest) + returns(FlushTableResponse); } // HBCK Service definitions. diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 901abf6bd0c5..3f3ecd63b002 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -191,6 +191,26 @@ message RestoreParentToChildRegionsPair { required string child2_region_name = 3; } +enum FlushTableState { + FLUSH_TABLE_PREPARE = 1; + FLUSH_TABLE_FLUSH_REGIONS = 2; +} + +message FlushTableProcedureStateData { + required TableName table_name = 1; + repeated bytes column_family = 2; +} + +message FlushRegionProcedureStateData { + required RegionInfo region = 1; + repeated bytes column_family = 2; +} + +message FlushRegionParameter { + required RegionInfo region = 1; + repeated bytes column_family = 2; +} + enum SnapshotState { SNAPSHOT_PREPARE = 1; SNAPSHOT_PRE_OPERATION = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index e79c9c2bc415..07f8339a20db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -291,7 +291,13 @@ public enum EventType { * RS verify snapshot.
* RS_VERIFY_SNAPSHOT */ - RS_VERIFY_SNAPSHOT(88, ExecutorType.RS_SNAPSHOT_OPERATIONS); + RS_VERIFY_SNAPSHOT(88, ExecutorType.RS_SNAPSHOT_OPERATIONS), + + /** + * RS flush regions.
+ * RS_FLUSH_OPERATIONS + */ + RS_FLUSH_REGIONS(89, ExecutorType.RS_FLUSH_OPERATIONS); private final int code; private final ExecutorType executor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index 1af70b2c44b1..b16a2dd4f95d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -54,7 +54,9 @@ public enum ExecutorType { RS_SWITCH_RPC_THROTTLE(33), RS_IN_MEMORY_COMPACTION(34), RS_CLAIM_REPLICATION_QUEUE(35), - RS_SNAPSHOT_OPERATIONS(36); + RS_SNAPSHOT_OPERATIONS(36), + + RS_FLUSH_OPERATIONS(37); ExecutorType(int value) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 1b5291491503..995bff17724e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -157,6 +157,7 @@ import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; +import org.apache.hadoop.hbase.master.procedure.FlushTableProcedure; import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; @@ -4381,4 +4382,34 @@ private void initializeCoprocessorHost(Configuration conf) { // initialize master side coprocessors before we start handling requests this.cpHost = new MasterCoprocessorHost(this, conf); } + + @Override + public long flushTable(TableName tableName, List columnFamilies, long nonceGroup, + long nonce) throws IOException { + checkInitialized(); + + if ( + !getConfiguration().getBoolean(MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED, + MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED_DEFAULT) + ) { + throw new DoNotRetryIOException("FlushTableProcedureV2 is DISABLED"); + } + + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preTableFlush(tableName); + LOG.info(getClientIdAuditPrefix() + " flush " + tableName); + submitProcedure( + new FlushTableProcedure(procedureExecutor.getEnvironment(), tableName, columnFamilies)); + getMaster().getMasterCoprocessorHost().postTableFlush(tableName); + } + + @Override + protected String getDescription() { + return "FlushTableProcedure"; + } + }); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 2e416f5e1a07..b6a17d8503b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -235,6 +235,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; @@ -3590,4 +3592,21 @@ public FlushMasterStoreResponse flushMasterStore(RpcController controller, } return FlushMasterStoreResponse.newBuilder().build(); } + + @Override + public FlushTableResponse flushTable(RpcController controller, FlushTableRequest req) + throws ServiceException { + TableName tableName = ProtobufUtil.toTableName(req.getTableName()); + List columnFamilies = req.getColumnFamilyCount() > 0 + ? req.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()).map(ByteString::toByteArray) + .collect(Collectors.toList()) + : null; + try { + long procId = + server.flushTable(tableName, columnFamilies, req.getNonceGroup(), req.getNonce()); + return FlushTableResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 95166240c789..933bf0d18150 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -477,4 +477,15 @@ boolean normalizeRegions(final NormalizeTableFilterParams ntfp, final boolean is * Flush master local region */ void flushMasterStore() throws IOException; + + /** + * Flush an existing table + * @param tableName The table name + * @param columnFamilies The column families to flush + * @param nonceGroup the nonce group + * @param nonce the nonce + * @return the flush procedure id + */ + long flushTable(final TableName tableName, final List columnFamilies, + final long nonceGroup, final long nonce) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java new file mode 100644 index 000000000000..67f0442b618a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; +import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.master.assignment.ServerState; +import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureEvent; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; +import org.apache.hadoop.hbase.regionserver.FlushRegionCallable; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.FlushRegionParameter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.FlushRegionProcedureStateData; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; + +@InterfaceAudience.Private +public class FlushRegionProcedure extends Procedure + implements TableProcedureInterface, RemoteProcedure { + private static final Logger LOG = LoggerFactory.getLogger(FlushRegionProcedure.class); + + private RegionInfo region; + private List columnFamilies; + private ProcedureEvent event; + private boolean dispatched; + private boolean succ; + private RetryCounter retryCounter; + + public FlushRegionProcedure() { + } + + public FlushRegionProcedure(RegionInfo region) { + this(region, null); + } + + public FlushRegionProcedure(RegionInfo region, List columnFamilies) { + this.region = region; + this.columnFamilies = columnFamilies; + } + + @Override + protected Procedure[] execute(MasterProcedureEnv env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + if (dispatched) { + if (succ) { + return null; + } + dispatched = false; + } + + RegionStates regionStates = env.getAssignmentManager().getRegionStates(); + RegionStateNode regionNode = regionStates.getRegionStateNode(region); + regionNode.lock(); + try { + if (!regionNode.isInState(State.OPEN) || regionNode.isInTransition()) { + LOG.info("State of region {} is not OPEN or in transition. Skip {} ...", region, this); + return null; + } + ServerName targetServer = regionNode.getRegionLocation(); + if (targetServer == null) { + setTimeoutForSuspend(env, + String.format("target server of region %s is null", region.getRegionNameAsString())); + throw new ProcedureSuspendedException(); + } + ServerState serverState = regionStates.getServerNode(targetServer).getState(); + if (serverState != ServerState.ONLINE) { + setTimeoutForSuspend(env, String.format("target server of region %s %s is in state %s", + region.getRegionNameAsString(), targetServer, serverState)); + throw new ProcedureSuspendedException(); + } + try { + env.getRemoteDispatcher().addOperationToNode(targetServer, this); + dispatched = true; + event = new ProcedureEvent<>(this); + event.suspendIfNotReady(this); + throw new ProcedureSuspendedException(); + } catch (FailedRemoteDispatchException e) { + setTimeoutForSuspend(env, "Failed send request to " + targetServer); + throw new ProcedureSuspendedException(); + } + } finally { + regionNode.unlock(); + } + } + + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + + @Override + protected void rollback(MasterProcedureEnv env) throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + return false; + } + + @Override + public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, IOException e) { + complete(env, e); + } + + @Override + public void remoteOperationCompleted(MasterProcedureEnv env) { + complete(env, null); + } + + @Override + public void remoteOperationFailed(MasterProcedureEnv env, RemoteProcedureException error) { + complete(env, error); + } + + private void complete(MasterProcedureEnv env, Throwable error) { + if (isFinished()) { + LOG.info("This procedure {} is already finished, skip the rest processes", this.getProcId()); + return; + } + if (event == null) { + LOG.warn("procedure event for {} is null, maybe the procedure is created when recovery", + getProcId()); + return; + } + if (error == null) { + succ = true; + } + event.wake(env.getProcedureScheduler()); + event = null; + } + + private void setTimeoutForSuspend(MasterProcedureEnv env, String reason) { + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.warn("{} can not run currently because {}, wait {} ms to retry", this, reason, backoff); + setTimeout(Math.toIntExact(backoff)); + setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); + skipPersistence(); + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + FlushRegionProcedureStateData.Builder builder = FlushRegionProcedureStateData.newBuilder(); + builder.setRegion(ProtobufUtil.toRegionInfo(region)); + if (columnFamilies != null) { + for (byte[] columnFamily : columnFamilies) { + if (columnFamily != null && columnFamily.length > 0) { + builder.addColumnFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); + } + } + } + serializer.serialize(builder.build()); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + FlushRegionProcedureStateData data = + serializer.deserialize(FlushRegionProcedureStateData.class); + this.region = ProtobufUtil.toRegionInfo(data.getRegion()); + if (data.getColumnFamilyCount() > 0) { + this.columnFamilies = data.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()) + .map(ByteString::toByteArray).collect(Collectors.toList()); + } + } + + @Override + public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { + FlushRegionParameter.Builder builder = FlushRegionParameter.newBuilder(); + builder.setRegion(ProtobufUtil.toRegionInfo(region)); + if (columnFamilies != null) { + for (byte[] columnFamily : columnFamilies) { + if (columnFamily != null && columnFamily.length > 0) { + builder.addColumnFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); + } + } + } + return Optional.of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), + FlushRegionCallable.class, builder.build().toByteArray())); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.FLUSH; + } + + @Override + protected boolean waitInitialized(MasterProcedureEnv env) { + return env.waitInitialized(this); + } + + @Override + public TableName getTableName() { + return region.getTable(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushTableProcedure.java new file mode 100644 index 000000000000..892d4d13b5ee --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushTableProcedure.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.FlushTableProcedureStateData; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.FlushTableState; + +@InterfaceAudience.Private +public class FlushTableProcedure extends AbstractStateMachineTableProcedure { + private static final Logger LOG = LoggerFactory.getLogger(FlushTableProcedure.class); + + private TableName tableName; + + private List columnFamilies; + + public FlushTableProcedure() { + super(); + } + + public FlushTableProcedure(MasterProcedureEnv env, TableName tableName) { + this(env, tableName, null); + } + + public FlushTableProcedure(MasterProcedureEnv env, TableName tableName, + List columnFamilies) { + super(env); + this.tableName = tableName; + this.columnFamilies = columnFamilies; + } + + @Override + protected LockState acquireLock(MasterProcedureEnv env) { + // Here we don't acquire table lock because the flush operation and other operations (like + // split or merge) are not mutually exclusive. Region will flush memstore when being closed. + // It's safe even if we don't have lock. However, currently we are limited by the scheduling + // mechanism of the procedure scheduler and have to acquire table shared lock here. See + // HBASE-27905 for details. + if (env.getProcedureScheduler().waitTableSharedLock(this, getTableName())) { + return LockState.LOCK_EVENT_WAIT; + } + return LockState.LOCK_ACQUIRED; + } + + @Override + protected void releaseLock(MasterProcedureEnv env) { + env.getProcedureScheduler().wakeTableSharedLock(this, getTableName()); + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, FlushTableState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.info("{} execute state={}", this, state); + + try { + switch (state) { + case FLUSH_TABLE_PREPARE: + preflightChecks(env, true); + setNextState(FlushTableState.FLUSH_TABLE_FLUSH_REGIONS); + return Flow.HAS_MORE_STATE; + case FLUSH_TABLE_FLUSH_REGIONS: + addChildProcedure(createFlushRegionProcedures(env)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (Exception e) { + if (e instanceof DoNotRetryIOException) { + // for example, TableNotFoundException or TableNotEnabledException + setFailure("master-flush-table", e); + LOG.warn("Unrecoverable error trying to flush " + getTableName() + " state=" + state, e); + } else { + LOG.warn("Retriable error trying to flush " + getTableName() + " state=" + state, e); + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(MasterProcedureEnv env, FlushTableState state) + throws IOException, InterruptedException { + // nothing to rollback + } + + @Override + protected FlushTableState getState(int stateId) { + return FlushTableState.forNumber(stateId); + } + + @Override + protected int getStateId(FlushTableState state) { + return state.getNumber(); + } + + @Override + protected FlushTableState getInitialState() { + return FlushTableState.FLUSH_TABLE_PREPARE; + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.FLUSH; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.serializeStateData(serializer); + FlushTableProcedureStateData.Builder builder = FlushTableProcedureStateData.newBuilder(); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); + if (columnFamilies != null) { + for (byte[] columnFamily : columnFamilies) { + if (columnFamily != null && columnFamily.length > 0) { + builder.addColumnFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); + } + } + } + serializer.serialize(builder.build()); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.deserializeStateData(serializer); + FlushTableProcedureStateData data = serializer.deserialize(FlushTableProcedureStateData.class); + this.tableName = ProtobufUtil.toTableName(data.getTableName()); + if (data.getColumnFamilyCount() > 0) { + this.columnFamilies = data.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()) + .map(ByteString::toByteArray).collect(Collectors.toList()); + } + } + + private FlushRegionProcedure[] createFlushRegionProcedures(MasterProcedureEnv env) { + return env.getAssignmentManager().getTableRegions(getTableName(), true).stream() + .filter(r -> RegionReplicaUtil.isDefaultReplica(r)) + .map(r -> new FlushRegionProcedure(r, columnFamilies)).toArray(FlushRegionProcedure[]::new); + } + + @Override + public void toStringClassDetails(StringBuilder builder) { + builder.append(getClass().getName()).append(", id=").append(getProcId()).append(", table=") + .append(tableName); + if (columnFamilies != null) { + builder.append(", columnFamilies=[") + .append(Strings.JOINER + .join(columnFamilies.stream().map(Bytes::toString).collect(Collectors.toList()))) + .append("]"); + } + } + + @Override + protected void afterReplay(MasterProcedureEnv env) { + if ( + !env.getMasterConfiguration().getBoolean( + MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED, + MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED_DEFAULT) + ) { + setFailure("master-flush-table", new HBaseIOException("FlushTableProcedureV2 is DISABLED")); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index c689e52302f3..1ca5b17ac21f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -42,6 +42,7 @@ public enum TableOperationType { READ, SNAPSHOT, REGION_SNAPSHOT, + FLUSH, REGION_EDIT, REGION_SPLIT, REGION_MERGE, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index 1a9847edcc89..d1acd08ea21c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -56,6 +56,7 @@ private static boolean requireTableExclusiveLock(TableProcedureInterface proc) { // we allow concurrent edit on the ns family in meta table return !proc.getTableName().equals(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME); case READ: + case FLUSH: case SNAPSHOT: return false; // region operations are using the shared-lock on the table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java index b521a85e7a0c..88ddf6102a55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.procedure.flush; -import java.util.Collections; import java.util.List; import java.util.concurrent.Callable; +import java.util.stream.Collectors; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.procedure.ProcedureMember; @@ -41,16 +41,16 @@ public class FlushTableSubprocedure extends Subprocedure { private static final Logger LOG = LoggerFactory.getLogger(FlushTableSubprocedure.class); private final String table; - private final String family; + private final List families; private final List regions; private final FlushTableSubprocedurePool taskManager; public FlushTableSubprocedure(ProcedureMember member, ForeignExceptionDispatcher errorListener, - long wakeFrequency, long timeout, List regions, String table, String family, + long wakeFrequency, long timeout, List regions, String table, List families, FlushTableSubprocedurePool taskManager) { super(member, table, errorListener, wakeFrequency, timeout); this.table = table; - this.family = family; + this.families = families; this.regions = regions; this.taskManager = taskManager; } @@ -70,7 +70,7 @@ public Void call() throws Exception { region.startRegionOperation(); try { LOG.debug("Flush region " + region.toString() + " started..."); - if (families == null) { + if (families == null || families.isEmpty()) { region.flush(true); } else { region.flushcache(families, false, FlushLifeCycleTracker.DUMMY); @@ -97,15 +97,16 @@ private void flushRegions() throws ForeignException { throw new IllegalStateException( "Attempting to flush " + table + " but we currently have outstanding tasks"); } - List families = null; - if (family != null) { - LOG.debug("About to flush family {} on all regions for table {}", family, table); - families = Collections.singletonList(Bytes.toBytes(family)); + + List familiesToFlush = null; + if (families != null && !families.isEmpty()) { + LOG.debug("About to flush family {} on all regions for table {}", families, table); + familiesToFlush = families.stream().map(Bytes::toBytes).collect(Collectors.toList()); } // Add all hfiles already existing in region. for (HRegion region : regions) { // submit one task per region for parallelize by region. - taskManager.submitTask(new RegionFlushTask(region, families)); + taskManager.submitTask(new RegionFlushTask(region, familiesToFlush)); monitor.rethrowException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java index 15d3d8a73a90..529d71ba728e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java @@ -58,6 +58,10 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; + public static final String FLUSH_PROCEDURE_ENABLED = "hbase.flush.procedure.enabled"; + + public static final boolean FLUSH_PROCEDURE_ENABLED_DEFAULT = true; + private static final String FLUSH_TIMEOUT_MILLIS_KEY = "hbase.flush.master.timeoutMillis"; private static final int FLUSH_TIMEOUT_MILLIS_DEFAULT = 60000; private static final String FLUSH_WAKE_MILLIS_KEY = "hbase.flush.master.wakeMillis"; @@ -142,13 +146,13 @@ public void execProcedure(ProcedureDescription desc) throws IOException { ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance()); - HBaseProtos.NameStringPair family = null; + HBaseProtos.NameStringPair families = null; for (HBaseProtos.NameStringPair nsp : desc.getConfigurationList()) { if (HConstants.FAMILY_KEY_STR.equals(nsp.getName())) { - family = nsp; + families = nsp; } } - byte[] procArgs = family != null ? family.toByteArray() : new byte[0]; + byte[] procArgs = families != null ? families.toByteArray() : new byte[0]; // Kick of the global procedure from the master coordinator to the region servers. // We rely on the existing Distributed Procedure framework to prevent any concurrent diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 3322f7a5cd5d..2cee89b57498 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -50,6 +51,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -129,7 +131,7 @@ public void stop(boolean force) throws IOException { * of a race where regions may be missed. * @return Subprocedure to submit to the ProcedureMember. */ - public Subprocedure buildSubprocedure(String table, String family) { + public Subprocedure buildSubprocedure(String table, List families) { // don't run the subprocedure if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { @@ -159,7 +161,7 @@ public Subprocedure buildSubprocedure(String table, String family) { FlushTableSubprocedurePool taskManager = new FlushTableSubprocedurePool(rss.getServerName().toString(), conf, rss); return new FlushTableSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, - involvedRegions, table, family, taskManager); + involvedRegions, table, families, taskManager); } /** @@ -175,19 +177,19 @@ public class FlushTableSubprocedureBuilder implements SubprocedureFactory { @Override public Subprocedure buildSubprocedure(String name, byte[] data) { - String family = null; - // Currently we do not put other data except family, so it is ok to - // judge by length that if family was specified + List families = null; + // Currently we do not put other data except families, so it is ok to + // judge by length that if families were specified if (data.length > 0) { try { HBaseProtos.NameStringPair nsp = HBaseProtos.NameStringPair.parseFrom(data); - family = nsp.getValue(); + families = ImmutableList.copyOf(Strings.SPLITTER.split(nsp.getValue())); } catch (Exception e) { - LOG.error("fail to get family by parsing from data", e); + LOG.error("fail to get families by parsing from data", e); } } // The name of the procedure instance from the master is the table name. - return RegionServerFlushTableProcedureManager.this.buildSubprocedure(name, family); + return RegionServerFlushTableProcedureManager.this.buildSubprocedure(name, families); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java new file mode 100644 index 000000000000..3dd932a1736d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.FlushRegionParameter; + +@InterfaceAudience.Private +public class FlushRegionCallable extends BaseRSProcedureCallable { + + private static final Logger LOG = LoggerFactory.getLogger(FlushRegionCallable.class); + + private RegionInfo regionInfo; + + private List columnFamilies; + + @Override + protected void doCall() throws Exception { + HRegion region = rs.getRegion(regionInfo.getEncodedName()); + if (region == null) { + throw new NotServingRegionException("region=" + regionInfo.getRegionNameAsString()); + } + LOG.debug("Starting region operation on {}", region); + region.startRegionOperation(); + try { + HRegion.FlushResult res; + if (columnFamilies == null) { + res = region.flush(true); + } else { + res = region.flushcache(columnFamilies, false, FlushLifeCycleTracker.DUMMY); + } + if (res.getResult() == HRegion.FlushResult.Result.CANNOT_FLUSH) { + throw new IOException("Unable to complete flush " + regionInfo); + } + } finally { + LOG.debug("Closing region operation on {}", region); + region.closeRegionOperation(); + } + } + + @Override + protected void initParameter(byte[] parameter) throws Exception { + FlushRegionParameter param = FlushRegionParameter.parseFrom(parameter); + this.regionInfo = ProtobufUtil.toRegionInfo(param.getRegion()); + if (param.getColumnFamilyCount() > 0) { + this.columnFamilies = param.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()) + .map(ByteString::toByteArray).collect(Collectors.toList()); + } + } + + @Override + public EventType getEventType() { + return EventType.RS_FLUSH_REGIONS; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f9f841181064..07d2ac332c5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1906,6 +1906,10 @@ executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLAIM_REPLI executorService.startExecutorService( executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SNAPSHOT_OPERATIONS) .setCorePoolSize(rsSnapshotOperationThreads)); + final int rsFlushOperationThreads = + conf.getInt("hbase.regionserver.executor.flush.operations.threads", 3); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.RS_FLUSH_OPERATIONS).setCorePoolSize(rsFlushOperationThreads)); Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller", uncaughtExceptionHandler); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index c82220a8b22a..a19b6ffbec64 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -536,4 +536,10 @@ public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { public Semaphore getSyncReplicationPeerLock() { return null; } + + @Override + public long flushTable(TableName tableName, List columnFamilies, long nonceGroup, + long nonce) throws IOException { + return 0; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedure.java new file mode 100644 index 000000000000..cd48370647df --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedure.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.Arrays; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestFlushTableProcedure extends TestFlushTableProcedureBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestFlushTableProcedure.class); + + @Test + public void testSimpleFlush() throws IOException { + assertTableMemStoreNotEmpty(); + TEST_UTIL.getAdmin().flush(TABLE_NAME); + assertTableMemStoreEmpty(); + } + + @Test + public void testFlushTableExceptionally() throws IOException { + Admin admin = TEST_UTIL.getAdmin(); + admin.disableTable(TABLE_NAME); + Assert.assertThrows(TableNotEnabledException.class, () -> admin.flush(TABLE_NAME)); + admin.deleteTable(TABLE_NAME); + Assert.assertThrows(TableNotFoundException.class, () -> admin.flush(TABLE_NAME)); + } + + @Test + public void testSingleColumnFamilyFlush() throws IOException { + assertTableMemStoreNotEmpty(); + TEST_UTIL.getAdmin().flush(TABLE_NAME, Arrays.asList(FAMILY1, FAMILY2, FAMILY3)); + assertTableMemStoreEmpty(); + } + + @Test + public void testMultiColumnFamilyFlush() throws IOException { + assertTableMemStoreNotEmpty(); + TEST_UTIL.getAdmin().flush(TABLE_NAME, Arrays.asList(FAMILY1, FAMILY2, FAMILY3)); + assertTableMemStoreEmpty(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureBase.java new file mode 100644 index 000000000000..9e3eee3c7044 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureBase.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.RegionSplitter; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +public class TestFlushTableProcedureBase { + + protected static HBaseTestingUtil TEST_UTIL; + + protected TableName TABLE_NAME; + protected byte[] FAMILY1; + protected byte[] FAMILY2; + protected byte[] FAMILY3; + + @Before + public void setup() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + addConfiguration(TEST_UTIL.getConfiguration()); + TEST_UTIL.startMiniCluster(3); + TABLE_NAME = TableName.valueOf(Bytes.toBytes("TestFlushTable")); + FAMILY1 = Bytes.toBytes("cf1"); + FAMILY2 = Bytes.toBytes("cf2"); + FAMILY3 = Bytes.toBytes("cf3"); + final byte[][] splitKeys = new RegionSplitter.HexStringSplit().split(10); + Table table = + TEST_UTIL.createTable(TABLE_NAME, new byte[][] { FAMILY1, FAMILY2, FAMILY3 }, splitKeys); + TEST_UTIL.loadTable(table, FAMILY1, false); + TEST_UTIL.loadTable(table, FAMILY2, false); + TEST_UTIL.loadTable(table, FAMILY3, false); + } + + protected void addConfiguration(Configuration config) { + // delay dispatch so that we can do something, for example kill a target server + config.setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 10000); + config.setInt(RemoteProcedureDispatcher.DISPATCH_MAX_QUEUE_SIZE_CONF_KEY, 128); + } + + protected void assertTableMemStoreNotEmpty() { + long totalSize = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream() + .mapToLong(HRegion::getMemStoreDataSize).sum(); + Assert.assertTrue(totalSize > 0); + } + + protected void assertTableMemStoreEmpty() { + long totalSize = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream() + .mapToLong(HRegion::getMemStoreDataSize).sum(); + Assert.assertEquals(0, totalSize); + } + + protected void assertColumnFamilyMemStoreNotEmpty(byte[] columnFamily) { + long totalSize = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream() + .mapToLong(r -> r.getStore(columnFamily).getMemStoreSize().getDataSize()).sum(); + Assert.assertTrue(totalSize > 0); + } + + protected void assertColumnFamilyMemStoreEmpty(byte[] columnFamily) { + long totalSize = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream() + .mapToLong(r -> r.getStore(columnFamily).getMemStoreSize().getDataSize()).sum(); + Assert.assertEquals(0, totalSize); + } + + @After + public void teardown() throws Exception { + if (TEST_UTIL.getHBaseCluster().getMaster() != null) { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate( + TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(), false); + } + TEST_UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureMasterRestarts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureMasterRestarts.java new file mode 100644 index 000000000000..c0c038982a9a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureMasterRestarts.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestFlushTableProcedureMasterRestarts extends TestFlushTableProcedureBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestFlushTableProcedureMasterRestarts.class); + + @Test + public void testMasterRestarts() throws IOException { + assertTableMemStoreNotEmpty(); + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + ProcedureExecutor procExec = master.getMasterProcedureExecutor(); + MasterProcedureEnv env = procExec.getEnvironment(); + FlushTableProcedure proc = new FlushTableProcedure(env, TABLE_NAME); + long procId = procExec.submitProcedure(proc); + TEST_UTIL.waitFor(5000, 1000, () -> proc.getState().getNumber() > 1); + + TEST_UTIL.getHBaseCluster().killMaster(master.getServerName()); + TEST_UTIL.getHBaseCluster().waitForMasterToStop(master.getServerName(), 30000); + TEST_UTIL.getHBaseCluster().startMaster(); + TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); + + master = TEST_UTIL.getHBaseCluster().getMaster(); + procExec = master.getMasterProcedureExecutor(); + ProcedureTestingUtility.waitProcedure(procExec, procId); + assertTableMemStoreEmpty(); + } + + @Test + public void testSkipRIT() throws IOException { + HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).get(0); + + TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() + .getRegionStateNode(region.getRegionInfo()) + .setState(RegionState.State.CLOSING, RegionState.State.OPEN); + + FlushRegionProcedure proc = new FlushRegionProcedure(region.getRegionInfo()); + TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().submitProcedure(proc); + + // wait for a time which is shorter than RSProcedureDispatcher delays + TEST_UTIL.waitFor(5000, () -> proc.isFinished()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureWithDoNotSupportFlushTableMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureWithDoNotSupportFlushTableMaster.java new file mode 100644 index 000000000000..66ccf362fef7 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFlushTableProcedureWithDoNotSupportFlushTableMaster.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestFlushTableProcedureWithDoNotSupportFlushTableMaster + extends TestFlushTableProcedureBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestFlushTableProcedureWithDoNotSupportFlushTableMaster.class); + + @Override + protected void addConfiguration(Configuration config) { + super.addConfiguration(config); + config.set(HConstants.MASTER_IMPL, DoNotSupportFlushTableMaster.class.getName()); + } + + @Test + public void testFlushFallback() throws IOException { + assertTableMemStoreNotEmpty(); + TEST_UTIL.getAdmin().flush(TABLE_NAME); + assertTableMemStoreEmpty(); + } + + @Test + public void testSingleColumnFamilyFlushFallback() throws IOException { + assertColumnFamilyMemStoreNotEmpty(FAMILY1); + TEST_UTIL.getAdmin().flush(TABLE_NAME, FAMILY1); + assertColumnFamilyMemStoreEmpty(FAMILY1); + } + + @Test + public void testMultiColumnFamilyFlushFallback() throws IOException { + assertTableMemStoreNotEmpty(); + TEST_UTIL.getAdmin().flush(TABLE_NAME, Arrays.asList(FAMILY1, FAMILY2, FAMILY3)); + assertTableMemStoreEmpty(); + } + + public static final class DoNotSupportFlushTableMaster extends HMaster { + + public DoNotSupportFlushTableMaster(Configuration conf) throws IOException { + super(conf); + } + + @Override + public long flushTable(TableName tableName, List columnFamilies, long nonceGroup, + long nonce) throws IOException { + throw new DoNotRetryIOException("UnsupportedOperation: flushTable"); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java index dc84642741f9..c1d9e2788d47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.util.List; @@ -124,7 +125,14 @@ private Pair generateAndFlushData(Table table) throws IOException table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value)); } long startTime = System.nanoTime(); - hbtu.getAdmin().flush(tableName); + hbtu.getHBaseCluster().getRegions(tableName).stream().findFirst().ifPresent(r -> { + try { + r.flush(true); + } catch (IOException e) { + LOG.error("Failed flush region {}", r, e); + fail("Failed flush region " + r.getRegionInfo().getRegionNameAsString()); + } + }); duration += System.nanoTime() - startTime; } HStore store = getStoreWithName(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 1c94affd1dd1..9b1d8524d003 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -237,6 +237,10 @@ public void flush(TableName tableName, byte[] columnFamily) throws IOException { admin.flush(tableName, columnFamily); } + public void flush(TableName tableName, List columnFamilies) throws IOException { + admin.flush(tableName, columnFamilies); + } + public void flushRegion(byte[] regionName) throws IOException { admin.flushRegion(regionName); } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 1b3c29ebe665..1b7b6938524a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -490,6 +490,11 @@ public void flush(TableName tableName, byte[] columnFamily) { throw new NotImplementedException("flush not supported in ThriftAdmin"); } + @Override + public void flush(TableName tableName, List columnFamilies) { + throw new NotImplementedException("flush not supported in ThriftAdmin"); + } + @Override public void flushRegion(byte[] regionName) { throw new NotImplementedException("flushRegion not supported in ThriftAdmin"); From 2fb2ae152a2d8aedda154bb1cf0bc72bf6e6b09c Mon Sep 17 00:00:00 2001 From: jbewing Date: Tue, 15 Aug 2023 10:28:13 -0400 Subject: [PATCH 052/514] HBASE-28012 Avoid CellUtil.cloneRow in BufferedEncodedSeeker (#5347) Signed-off-by: Duo Zhang --- .../io/encoding/BufferedDataBlockEncoder.java | 157 +++++++++++++++--- .../hadoop/hbase/util/ByteBufferUtils.java | 24 +++ .../io/encoding/TestDataBlockEncoders.java | 65 ++++++++ 3 files changed, 223 insertions(+), 23 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index a88d9fbdc16c..f998f40d68b9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -87,21 +87,100 @@ public ByteBuffer decodeKeyValues(DataInputStream source, // Having this as static is fine but if META is having DBE then we should // change this. public static int compareCommonRowPrefix(Cell left, Cell right, int rowCommonPrefix) { - return Bytes.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, - left.getRowLength() - rowCommonPrefix, right.getRowArray(), - right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix); + if (left instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left; + if (right instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right; + return ByteBufferUtils.compareTo(bbLeft.getRowByteBuffer(), + bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, + bbRight.getRowByteBuffer(), bbRight.getRowPosition() + rowCommonPrefix, + right.getRowLength() - rowCommonPrefix); + } else { + return ByteBufferUtils.compareTo(bbLeft.getRowByteBuffer(), + bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, + right.getRowArray(), right.getRowOffset() + rowCommonPrefix, + right.getRowLength() - rowCommonPrefix); + } + } else { + if (right instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right; + return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, + left.getRowLength() - rowCommonPrefix, bbRight.getRowByteBuffer(), + bbRight.getRowPosition() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix); + } else { + return Bytes.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, + left.getRowLength() - rowCommonPrefix, right.getRowArray(), + right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix); + } + } } public static int compareCommonFamilyPrefix(Cell left, Cell right, int familyCommonPrefix) { - return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset() + familyCommonPrefix, - left.getFamilyLength() - familyCommonPrefix, right.getFamilyArray(), - right.getFamilyOffset() + familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix); + if (left instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left; + if (right instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right; + return ByteBufferUtils.compareTo(bbLeft.getFamilyByteBuffer(), + bbLeft.getFamilyPosition() + familyCommonPrefix, + left.getFamilyLength() - familyCommonPrefix, bbRight.getFamilyByteBuffer(), + bbRight.getFamilyPosition() + familyCommonPrefix, + right.getFamilyLength() - familyCommonPrefix); + } else { + return ByteBufferUtils.compareTo(bbLeft.getFamilyByteBuffer(), + bbLeft.getFamilyPosition() + familyCommonPrefix, + left.getFamilyLength() - familyCommonPrefix, right.getFamilyArray(), + right.getFamilyOffset() + familyCommonPrefix, + right.getFamilyLength() - familyCommonPrefix); + } + } else { + if (right instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right; + return ByteBufferUtils.compareTo(left.getFamilyArray(), + left.getFamilyOffset() + familyCommonPrefix, left.getFamilyLength() - familyCommonPrefix, + bbRight.getFamilyByteBuffer(), bbRight.getFamilyPosition() + familyCommonPrefix, + right.getFamilyLength() - familyCommonPrefix); + } else { + return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset() + familyCommonPrefix, + left.getFamilyLength() - familyCommonPrefix, right.getFamilyArray(), + right.getFamilyOffset() + familyCommonPrefix, + right.getFamilyLength() - familyCommonPrefix); + } + } } public static int compareCommonQualifierPrefix(Cell left, Cell right, int qualCommonPrefix) { - return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset() + qualCommonPrefix, - left.getQualifierLength() - qualCommonPrefix, right.getQualifierArray(), - right.getQualifierOffset() + qualCommonPrefix, right.getQualifierLength() - qualCommonPrefix); + if (left instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left; + if (right instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right; + return ByteBufferUtils.compareTo(bbLeft.getQualifierByteBuffer(), + bbLeft.getQualifierPosition() + qualCommonPrefix, + left.getQualifierLength() - qualCommonPrefix, bbRight.getQualifierByteBuffer(), + bbRight.getQualifierPosition() + qualCommonPrefix, + right.getQualifierLength() - qualCommonPrefix); + } else { + return ByteBufferUtils.compareTo(bbLeft.getQualifierByteBuffer(), + bbLeft.getQualifierPosition() + qualCommonPrefix, + left.getQualifierLength() - qualCommonPrefix, right.getQualifierArray(), + right.getQualifierOffset() + qualCommonPrefix, + right.getQualifierLength() - qualCommonPrefix); + } + } else { + if (right instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right; + return ByteBufferUtils.compareTo(left.getQualifierArray(), + left.getQualifierOffset() + qualCommonPrefix, + left.getQualifierLength() - qualCommonPrefix, bbRight.getQualifierByteBuffer(), + bbRight.getQualifierPosition() + qualCommonPrefix, + right.getQualifierLength() - qualCommonPrefix); + } else { + return Bytes.compareTo(left.getQualifierArray(), + left.getQualifierOffset() + qualCommonPrefix, + left.getQualifierLength() - qualCommonPrefix, right.getQualifierArray(), + right.getQualifierOffset() + qualCommonPrefix, + right.getQualifierLength() - qualCommonPrefix); + } + } } protected static class SeekerState { @@ -954,25 +1033,57 @@ private int compareTypeBytes(Cell key, Cell right) { return 0; } - private static int findCommonPrefixInRowPart(Cell left, Cell right, int rowCommonPrefix) { - return Bytes.findCommonPrefix(left.getRowArray(), right.getRowArray(), - left.getRowLength() - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, - left.getRowOffset() + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix); + // These findCommonPrefix* methods rely on the fact that keyOnlyKv is the "right" cell argument + // and always on-heap + + private static int findCommonPrefixInRowPart(Cell left, KeyValue.KeyOnlyKeyValue right, + int rowCommonPrefix) { + if (left instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left; + return ByteBufferUtils.findCommonPrefix(bbLeft.getRowByteBuffer(), + bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, + right.getRowArray(), right.getRowOffset() + rowCommonPrefix, + right.getRowLength() - rowCommonPrefix); + } else { + return Bytes.findCommonPrefix(left.getRowArray(), right.getRowArray(), + left.getRowLength() - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, + left.getRowOffset() + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix); + } } - private static int findCommonPrefixInFamilyPart(Cell left, Cell right, int familyCommonPrefix) { - return Bytes.findCommonPrefix(left.getFamilyArray(), right.getFamilyArray(), - left.getFamilyLength() - familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix, - left.getFamilyOffset() + familyCommonPrefix, right.getFamilyOffset() + familyCommonPrefix); + private static int findCommonPrefixInFamilyPart(Cell left, KeyValue.KeyOnlyKeyValue right, + int familyCommonPrefix) { + if (left instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left; + return ByteBufferUtils.findCommonPrefix(bbLeft.getFamilyByteBuffer(), + bbLeft.getFamilyPosition() + familyCommonPrefix, + left.getFamilyLength() - familyCommonPrefix, right.getFamilyArray(), + right.getFamilyOffset() + familyCommonPrefix, + right.getFamilyLength() - familyCommonPrefix); + } else { + return Bytes.findCommonPrefix(left.getFamilyArray(), right.getFamilyArray(), + left.getFamilyLength() - familyCommonPrefix, right.getFamilyLength() - familyCommonPrefix, + left.getFamilyOffset() + familyCommonPrefix, + right.getFamilyOffset() + familyCommonPrefix); + } } - private static int findCommonPrefixInQualifierPart(Cell left, Cell right, + private static int findCommonPrefixInQualifierPart(Cell left, KeyValue.KeyOnlyKeyValue right, int qualifierCommonPrefix) { - return Bytes.findCommonPrefix(left.getQualifierArray(), right.getQualifierArray(), - left.getQualifierLength() - qualifierCommonPrefix, - right.getQualifierLength() - qualifierCommonPrefix, - left.getQualifierOffset() + qualifierCommonPrefix, - right.getQualifierOffset() + qualifierCommonPrefix); + if (left instanceof ByteBufferExtendedCell) { + ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left; + return ByteBufferUtils.findCommonPrefix(bbLeft.getQualifierByteBuffer(), + bbLeft.getQualifierPosition() + qualifierCommonPrefix, + left.getQualifierLength() - qualifierCommonPrefix, right.getQualifierArray(), + right.getQualifierOffset() + qualifierCommonPrefix, + right.getQualifierLength() - qualifierCommonPrefix); + } else { + return Bytes.findCommonPrefix(left.getQualifierArray(), right.getQualifierArray(), + left.getQualifierLength() - qualifierCommonPrefix, + right.getQualifierLength() - qualifierCommonPrefix, + left.getQualifierOffset() + qualifierCommonPrefix, + right.getQualifierOffset() + qualifierCommonPrefix); + } } private void moveToPrevious() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index be1868b70d7f..a5a5c5105db0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -778,6 +778,30 @@ public static int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLeng return result; } + /** + * Find length of common prefix in two arrays. + * @param left ByteBuffer to be compared. + * @param leftOffset Offset in left ByteBuffer. + * @param leftLength Length of left ByteBuffer. + * @param right Array to be compared + * @param rightOffset Offset in right Array. + * @param rightLength Length of right Array. + */ + public static int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength) { + int length = Math.min(leftLength, rightLength); + int result = 0; + + while ( + result < length + && ByteBufferUtils.toByte(left, leftOffset + result) == right[rightOffset + result] + ) { + result++; + } + + return result; + } + /** * Check whether two parts in the same buffer are equal. * @param buffer In which buffer there are parts diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index eeeb078988fb..5b3e5db6c2fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -33,6 +33,7 @@ import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; +import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; @@ -230,6 +231,59 @@ public void testSeekingOnSample() throws IOException { LOG.info("Done"); } + @Test + public void testSeekingToOffHeapKeyValueInSample() throws IOException { + List sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags); + + // create all seekers + List encodedSeekers = new ArrayList<>(); + for (DataBlockEncoding encoding : DataBlockEncoding.values()) { + LOG.info("Encoding: " + encoding); + DataBlockEncoder encoder = encoding.getEncoder(); + if (encoder == null) { + continue; + } + LOG.info("Encoder: " + encoder); + ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); + DataBlockEncoder.EncodedSeeker seeker = + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); + seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); + encodedSeekers.add(seeker); + } + LOG.info("Testing it!"); + // test it! + // try a few random seeks + Random rand = ThreadLocalRandom.current(); + for (boolean seekBefore : new boolean[] { false, true }) { + for (int i = 0; i < NUM_RANDOM_SEEKS; ++i) { + int keyValueId; + if (!seekBefore) { + keyValueId = rand.nextInt(sampleKv.size()); + } else { + keyValueId = rand.nextInt(sampleKv.size() - 1) + 1; + } + + KeyValue keyValue = sampleKv.get(keyValueId); + checkSeekingConsistency(encodedSeekers, seekBefore, buildOffHeapKeyValue(keyValue)); + } + } + + // check edge cases + LOG.info("Checking edge cases"); + checkSeekingConsistency(encodedSeekers, false, sampleKv.get(0)); + for (boolean seekBefore : new boolean[] { false, true }) { + checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); + KeyValue midKv = sampleKv.get(sampleKv.size() / 2); + Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); + checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); + } + LOG.info("Done"); + } + static ByteBuffer encodeKeyValues(DataBlockEncoding encoding, List kvs, HFileBlockEncodingContext encodingContext, boolean useOffheapData) throws IOException { DataBlockEncoder encoder = encoding.getEncoder(); @@ -438,4 +492,15 @@ private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf, assertEquals("Encoding -> decoding gives different results for " + encoder, Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset)); } + + private static ByteBufferKeyValue buildOffHeapKeyValue(KeyValue keyValue) throws IOException { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + keyValue.write(out, false); + byte[] bytes = out.toByteArray(); + ByteBuffer bb = ByteBuffer.allocateDirect(bytes.length); + bb.put(bytes); + bb.flip(); + + return new ByteBufferKeyValue(bb, 0, bytes.length); + } } From b1fd92d3c7ea56b2f49eb9d419e1311a34c2f612 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 17 Aug 2023 15:35:15 +0800 Subject: [PATCH 053/514] HBASE-28027 Make TestClusterScopeQuotaThrottle run faster (#5355) Signed-off-by: GeorryHuang --- .../hbase/quotas/TestClusterScopeQuotaThrottle.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java index 8537d9001a7a..b34f722e2e78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java @@ -28,7 +28,6 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -75,7 +74,6 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); - TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); TEST_UTIL.startMiniCluster(2); TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); @@ -83,12 +81,16 @@ public static void setUpBeforeClass() throws Exception { tables = new Table[TABLE_NAMES.length]; for (int i = 0; i < TABLE_NAMES.length; ++i) { - tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY); + TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAMES[i]); + tables[i] = TEST_UTIL.getConnection().getTableBuilder(TABLE_NAMES[i], null) + .setOperationTimeout(10000).build(); } TEST_UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(NAMESPACE).build()); - table = TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLITS); + TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLITS); TEST_UTIL.waitTableAvailable(TABLE_NAME); + table = TEST_UTIL.getConnection().getTableBuilder(TABLE_NAME, null).setOperationTimeout(10000) + .build(); } @AfterClass @@ -180,7 +182,6 @@ public void testUserClusterScopeQuota() throws Exception { triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAMES); } - @org.junit.Ignore @Test // Spews the log w/ triggering of scheduler? HBASE-24035 public void testUserNamespaceClusterScopeQuota() throws Exception { final Admin admin = TEST_UTIL.getAdmin(); From 8ccb910fa93c10dd84aac5e38ac0c0e44db935bb Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 18 Aug 2023 09:59:38 -0400 Subject: [PATCH 054/514] HBASE-27947 RegionServer OOM when outbound channel backed up (#5350) Signed-off-by: Duo Zhang Reviewed-by: Norman Maurer --- .../hadoop/hbase/util/NettyUnsafeUtils.java | 61 ++++++ .../hbase/ipc/MetricsHBaseServerSource.java | 16 ++ .../ipc/MetricsHBaseServerSourceImpl.java | 24 +++ .../hbase/ipc/MetricsHBaseServerWrapper.java | 7 + .../hadoop/hbase/ipc/MetricsHBaseServer.java | 8 + .../ipc/MetricsHBaseServerWrapperImpl.java | 13 ++ .../hadoop/hbase/ipc/NettyRpcServer.java | 161 +++++++++++++++- ...ttyRpcServerChannelWritabilityHandler.java | 125 ++++++++++++ .../ipc/NettyRpcServerPreambleHandler.java | 11 +- .../hadoop/hbase/ipc/NettyServerCall.java | 2 +- .../hbase/ipc/FailingNettyRpcServer.java | 9 +- .../ipc/MetricsHBaseServerWrapperStub.java | 7 + .../ipc/TestNettyChannelWritability.java | 182 ++++++++++++++++++ .../hadoop/hbase/ipc/TestRpcMetrics.java | 9 + .../ipc/TestRpcSkipInitialSaslHandshake.java | 28 +-- 15 files changed, 619 insertions(+), 44 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/NettyUnsafeUtils.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerChannelWritabilityHandler.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NettyUnsafeUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NettyUnsafeUtils.java new file mode 100644 index 000000000000..8b246e978ea0 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NettyUnsafeUtils.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.io.netty.channel.Channel; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelOption; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundBuffer; + +/** + * Wraps some usages of netty's unsafe API, for ease of maintainability. + */ +@InterfaceAudience.Private +public final class NettyUnsafeUtils { + + private NettyUnsafeUtils() { + } + + /** + * Directly closes the channel, setting SO_LINGER to 0 and skipping any handlers in the pipeline. + * This is useful for cases where it's important to immediately close without any delay. + * Otherwise, pipeline handlers and even general TCP flows can cause a normal close to take + * upwards of a few second or more. This will likely cause the client side to see either a + * "Connection reset by peer" or unexpected ConnectionClosedException. + *

+ * It's necessary to call this from within the channel's eventLoop! + */ + public static void closeImmediately(Channel channel) { + assert channel.eventLoop().inEventLoop(); + channel.config().setOption(ChannelOption.SO_LINGER, 0); + channel.unsafe().close(channel.voidPromise()); + } + + /** + * Get total bytes pending write to socket + */ + public static long getTotalPendingOutboundBytes(Channel channel) { + ChannelOutboundBuffer outboundBuffer = channel.unsafe().outboundBuffer(); + // can be null when the channel is closing + if (outboundBuffer == null) { + return 0; + } + return outboundBuffer.totalPendingWriteBytes(); + } +} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 98ecf8b8d92d..df2e335a718f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -46,6 +46,14 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String PROCESS_CALL_TIME_DESC = "Processing call time."; String TOTAL_CALL_TIME_NAME = "totalCallTime"; String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; + + String UNWRITABLE_TIME_NAME = "unwritableTime"; + String UNWRITABLE_TIME_DESC = + "Time where an channel was unwritable due to having too many outbound bytes"; + String MAX_OUTBOUND_BYTES_EXCEEDED_NAME = "maxOutboundBytesExceeded"; + String MAX_OUTBOUND_BYTES_EXCEEDED_DESC = + "Number of times a connection was closed because the channel outbound " + + "bytes exceeded the configured max."; String QUEUE_SIZE_NAME = "queueSize"; String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + "parsed and is waiting to run or is currently being executed."; @@ -97,6 +105,10 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String NETTY_DM_USAGE_NAME = "nettyDirectMemoryUsage"; String NETTY_DM_USAGE_DESC = "Current Netty direct memory usage."; + String NETTY_TOTAL_PENDING_OUTBOUND_NAME = "nettyTotalPendingOutboundBytes"; + String NETTY_TOTAL_PENDING_OUTBOUND_DESC = "Current total bytes pending write to all channel"; + String NETTY_MAX_PENDING_OUTBOUND_NAME = "nettyMaxPendingOutboundBytes"; + String NETTY_MAX_PENDING_OUTBOUND_DESC = "Current maximum bytes pending write to any channel"; void authorizationSuccess(); @@ -121,4 +133,8 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { void processedCall(int processingTime); void queuedAndProcessedCall(int totalTime); + + void unwritableTime(long unwritableTime); + + void maxOutboundBytesExceeded(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index 9c75f4e6bcba..1a6d557d8adc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -36,10 +37,12 @@ public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl private final MutableFastCounter authenticationFallbacks; private final MutableFastCounter sentBytes; private final MutableFastCounter receivedBytes; + private final MutableFastCounter maxOutboundBytesExceeded; private MetricHistogram queueCallTime; private MetricHistogram processCallTime; private MetricHistogram totalCallTime; + private MetricHistogram unwritableTime; private MetricHistogram requestSize; private MetricHistogram responseSize; @@ -67,6 +70,10 @@ public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescriptio this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, PROCESS_CALL_TIME_DESC); this.totalCallTime = this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, TOTAL_CALL_TIME_DESC); + this.unwritableTime = + this.getMetricsRegistry().newTimeHistogram(UNWRITABLE_TIME_NAME, UNWRITABLE_TIME_DESC); + this.maxOutboundBytesExceeded = this.getMetricsRegistry() + .newCounter(MAX_OUTBOUND_BYTES_EXCEEDED_NAME, MAX_OUTBOUND_BYTES_EXCEEDED_DESC, 0); this.requestSize = this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, REQUEST_SIZE_DESC); this.responseSize = @@ -133,6 +140,16 @@ public void queuedAndProcessedCall(int totalTime) { totalCallTime.add(totalTime); } + @Override + public void unwritableTime(long unwritableTime) { + this.unwritableTime.add(unwritableTime); + } + + @Override + public void maxOutboundBytesExceeded() { + maxOutboundBytesExceeded.incr(); + } + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); @@ -177,6 +194,13 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { wrapper.getActiveScanRpcHandlerCount()) .addGauge(Interns.info(NETTY_DM_USAGE_NAME, NETTY_DM_USAGE_DESC), wrapper.getNettyDmUsage()); + + Pair totalAndMax = wrapper.getTotalAndMaxNettyOutboundBytes(); + mrb.addGauge( + Interns.info(NETTY_TOTAL_PENDING_OUTBOUND_NAME, NETTY_TOTAL_PENDING_OUTBOUND_DESC), + totalAndMax.getFirst()); + mrb.addGauge(Interns.info(NETTY_MAX_PENDING_OUTBOUND_NAME, NETTY_MAX_PENDING_OUTBOUND_DESC), + totalAndMax.getSecond()); } metricsRegistry.snapshot(mrb, all); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java index 1a8980bbc7bd..bb376cba930d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -64,4 +65,10 @@ public interface MetricsHBaseServerWrapper { int getActiveScanRpcHandlerCount(); long getNettyDmUsage(); + + /** + * These two metrics are calculated together, so we want to return them in one call + * @return pair containing total (first) and max (second) pending outbound bytes. + */ + Pair getTotalAndMaxNettyOutboundBytes(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index a4c73f925d3c..b5fbb5c43d15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -97,6 +97,14 @@ void totalCall(int totalTime) { source.queuedAndProcessedCall(totalTime); } + void unwritableTime(long unwritableTime) { + source.unwritableTime(unwritableTime); + } + + void maxOutboundBytesExceeded() { + source.maxOutboundBytesExceeded(); + } + public void exception(Throwable throwable) { source.exception(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java index 857315568c5e..1fc1806265d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.util.DirectMemoryUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -209,4 +210,16 @@ public long getNettyDmUsage() { return DirectMemoryUtils.getNettyDirectMemoryUsage(); } + + @Override + public Pair getTotalAndMaxNettyOutboundBytes() { + if ( + !isServerStarted() || this.server.getScheduler() == null + || !(this.server instanceof NettyRpcServer) + ) { + return Pair.newPair(0L, 0L); + } + + return ((NettyRpcServer) server).getTotalAndMaxNettyOutboundBytes(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index 0b7badf7d815..722ee1d28c91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.io.crypto.tls.X509Util; import org.apache.hadoop.hbase.security.HBasePolicyProvider; import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; +import org.apache.hadoop.hbase.util.NettyUnsafeUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.yetus.audience.InterfaceAudience; @@ -53,6 +55,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.ServerChannel; +import org.apache.hbase.thirdparty.io.netty.channel.WriteBufferWaterMark; import org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup; import org.apache.hbase.thirdparty.io.netty.channel.group.DefaultChannelGroup; import org.apache.hbase.thirdparty.io.netty.handler.codec.FixedLengthFrameDecoder; @@ -84,6 +87,38 @@ public class NettyRpcServer extends RpcServer { static final String UNPOOLED_ALLOCATOR_TYPE = "unpooled"; static final String HEAP_ALLOCATOR_TYPE = "heap"; + /** + * Low watermark for pending outbound bytes of a single netty channel. If the high watermark was + * exceeded, channel will have setAutoRead to true again. The server will start reading incoming + * bytes (requests) from the client channel. + */ + public static final String CHANNEL_WRITABLE_LOW_WATERMARK_KEY = + "hbase.server.netty.writable.watermark.low"; + private static final int CHANNEL_WRITABLE_LOW_WATERMARK_DEFAULT = 0; + + /** + * High watermark for pending outbound bytes of a single netty channel. If the number of pending + * outbound bytes exceeds this threshold, setAutoRead will be false for the channel. The server + * will stop reading incoming requests from the client channel. + *

+ * Note: any requests already in the call queue will still be processed. + */ + public static final String CHANNEL_WRITABLE_HIGH_WATERMARK_KEY = + "hbase.server.netty.writable.watermark.high"; + private static final int CHANNEL_WRITABLE_HIGH_WATERMARK_DEFAULT = 0; + + /** + * Fatal watermark for pending outbound bytes of a single netty channel. If the number of pending + * outbound bytes exceeds this threshold, the connection will be forcibly closed so that memory + * can be reclaimed. The client will have to re-establish a new connection and retry any in-flight + * requests. + *

+ * Note: must be higher than the high watermark, otherwise it's ignored. + */ + public static final String CHANNEL_WRITABLE_FATAL_WATERMARK_KEY = + "hbase.server.netty.writable.watermark.fatal"; + private static final int CHANNEL_WRITABLE_FATAL_WATERMARK_DEFAULT = 0; + private final InetSocketAddress bindAddress; private final CountDownLatch closed = new CountDownLatch(1); @@ -94,6 +129,9 @@ public class NettyRpcServer extends RpcServer { private final AtomicReference keyStoreWatcher = new AtomicReference<>(); private final AtomicReference trustStoreWatcher = new AtomicReference<>(); + private volatile int writeBufferFatalThreshold; + private volatile WriteBufferWaterMark writeBufferWaterMark; + public NettyRpcServer(Server server, String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { @@ -108,6 +146,10 @@ public NettyRpcServer(Server server, String name, List channelClass = config.serverChannelClass(); ServerBootstrap bootstrap = new ServerBootstrap().group(eventLoopGroup).channel(channelClass) @@ -117,6 +159,7 @@ public NettyRpcServer(Server server, String name, List() { @Override protected void initChannel(Channel ch) throws Exception { + ch.config().setWriteBufferWaterMark(writeBufferWaterMark); ch.config().setAllocator(channelAllocator); ChannelPipeline pipeline = ch.pipeline(); FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); @@ -124,12 +167,18 @@ protected void initChannel(Channel ch) throws Exception { if (conf.getBoolean(HBASE_SERVER_NETTY_TLS_ENABLED, false)) { initSSL(pipeline, conf.getBoolean(HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT, true)); } + NettyServerRpcConnection conn = createNettyServerRpcConnection(ch); pipeline.addLast(NettyRpcServerPreambleHandler.DECODER_NAME, preambleDecoder) - .addLast(createNettyRpcServerPreambleHandler()) + .addLast(new NettyRpcServerPreambleHandler(NettyRpcServer.this, conn)) // We need NettyRpcServerResponseEncoder here because NettyRpcServerPreambleHandler may // send RpcResponse to client. - .addLast(NettyRpcServerResponseEncoder.NAME, - new NettyRpcServerResponseEncoder(metrics)); + .addLast(NettyRpcServerResponseEncoder.NAME, new NettyRpcServerResponseEncoder(metrics)) + // Add writability handler after the response encoder, so we can abort writes before + // they get encoded, if the fatal threshold is exceeded. We pass in suppliers here so + // that the handler configs can be live updated via update_config. + .addLast(NettyRpcServerChannelWritabilityHandler.NAME, + new NettyRpcServerChannelWritabilityHandler(metrics, () -> writeBufferFatalThreshold, + () -> isWritabilityBackpressureEnabled())); } }); try { @@ -142,6 +191,91 @@ protected void initChannel(Channel ch) throws Exception { this.scheduler.init(new RpcSchedulerContext(this)); } + @Override + public void onConfigurationChange(Configuration newConf) { + super.onConfigurationChange(newConf); + configureNettyWatermarks(newConf); + } + + private void configureNettyWatermarks(Configuration conf) { + int watermarkLow = + conf.getInt(CHANNEL_WRITABLE_LOW_WATERMARK_KEY, CHANNEL_WRITABLE_LOW_WATERMARK_DEFAULT); + int watermarkHigh = + conf.getInt(CHANNEL_WRITABLE_HIGH_WATERMARK_KEY, CHANNEL_WRITABLE_HIGH_WATERMARK_DEFAULT); + int fatalThreshold = + conf.getInt(CHANNEL_WRITABLE_FATAL_WATERMARK_KEY, CHANNEL_WRITABLE_FATAL_WATERMARK_DEFAULT); + + WriteBufferWaterMark oldWaterMark = writeBufferWaterMark; + int oldFatalThreshold = writeBufferFatalThreshold; + + boolean disabled = false; + if (watermarkHigh == 0 && watermarkLow == 0) { + // if both are 0, use the netty default, which we will treat as "disabled". + // when disabled, we won't manage autoRead in response to writability changes. + writeBufferWaterMark = WriteBufferWaterMark.DEFAULT; + disabled = true; + } else { + // netty checks pendingOutboundBytes < watermarkLow. It can never be less than 0, so set to + // 1 to avoid confusing behavior. + if (watermarkLow == 0) { + LOG.warn( + "Detected a {} value of 0, which is impossible to achieve " + + "due to how netty evaluates these thresholds, setting to 1", + CHANNEL_WRITABLE_LOW_WATERMARK_KEY); + watermarkLow = 1; + } + + // netty validates the watermarks and throws an exception if high < low, fail more gracefully + // by disabling the watermarks and warning. + if (watermarkHigh <= watermarkLow) { + LOG.warn( + "Detected {} value {}, lower than {} value {}. This will fail netty validation, " + + "so disabling", + CHANNEL_WRITABLE_HIGH_WATERMARK_KEY, watermarkHigh, CHANNEL_WRITABLE_LOW_WATERMARK_KEY, + watermarkLow); + writeBufferWaterMark = WriteBufferWaterMark.DEFAULT; + } else { + writeBufferWaterMark = new WriteBufferWaterMark(watermarkLow, watermarkHigh); + } + + // only apply this check when watermark is enabled. this way we give the operator some + // flexibility if they want to try enabling fatal threshold without backpressure. + if (fatalThreshold > 0 && fatalThreshold <= watermarkHigh) { + LOG.warn("Detected a {} value of {}, which is lower than the {} value of {}, ignoring.", + CHANNEL_WRITABLE_FATAL_WATERMARK_KEY, fatalThreshold, CHANNEL_WRITABLE_HIGH_WATERMARK_KEY, + watermarkHigh); + fatalThreshold = 0; + } + } + + writeBufferFatalThreshold = fatalThreshold; + + if ( + oldWaterMark != null && (oldWaterMark.low() != writeBufferWaterMark.low() + || oldWaterMark.high() != writeBufferWaterMark.high() + || oldFatalThreshold != writeBufferFatalThreshold) + ) { + LOG.info("Updated netty outbound write buffer watermarks: low={}, high={}, fatal={}", + disabled ? "disabled" : writeBufferWaterMark.low(), + disabled ? "disabled" : writeBufferWaterMark.high(), + writeBufferFatalThreshold <= 0 ? "disabled" : writeBufferFatalThreshold); + } + + // update any existing channels + for (Channel channel : allChannels) { + channel.config().setWriteBufferWaterMark(writeBufferWaterMark); + // if disabling watermark, set auto read to true in case channel had been exceeding + // previous watermark + if (disabled) { + channel.config().setAutoRead(true); + } + } + } + + public boolean isWritabilityBackpressureEnabled() { + return writeBufferWaterMark != WriteBufferWaterMark.DEFAULT; + } + private ByteBufAllocator getChannelAllocator(Configuration conf) throws IOException { final String value = conf.get(HBASE_NETTY_ALLOCATOR_KEY); if (value != null) { @@ -172,10 +306,10 @@ private ByteBufAllocator getChannelAllocator(Configuration conf) throws IOExcept } } - // will be overriden in tests + // will be overridden in tests @InterfaceAudience.Private - protected NettyRpcServerPreambleHandler createNettyRpcServerPreambleHandler() { - return new NettyRpcServerPreambleHandler(NettyRpcServer.this); + protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { + return new NettyServerRpcConnection(NettyRpcServer.this, channel); } @Override @@ -296,4 +430,19 @@ SslContext getSslContext() throws X509Exception, IOException { } return result; } + + public int getWriteBufferFatalThreshold() { + return writeBufferFatalThreshold; + } + + public Pair getTotalAndMaxNettyOutboundBytes() { + long total = 0; + long max = 0; + for (Channel channel : allChannels) { + long outbound = NettyUnsafeUtils.getTotalPendingOutboundBytes(channel); + total += outbound; + max = Math.max(max, outbound); + } + return Pair.newPair(total, max); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerChannelWritabilityHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerChannelWritabilityHandler.java new file mode 100644 index 000000000000..4b0b3878da81 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerChannelWritabilityHandler.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import java.util.function.BooleanSupplier; +import java.util.function.IntSupplier; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.NettyUnsafeUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.io.netty.channel.Channel; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; +import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; + +/** + * Handler to enforce writability protections on our server channels:
+ * - Responds to channel writability events, which are triggered when the total pending bytes for a + * channel passes configured high and low watermarks. When high watermark is exceeded, the channel + * is setAutoRead(false). This way, we won't accept new requests from the client until some pending + * outbound bytes are successfully received by the client.
+ * - Pre-processes any channel write requests. If the total pending outbound bytes exceeds a fatal + * threshold, the channel is forcefully closed and the write is set to failed. This handler should + * be the last handler in the pipeline so that it's the first handler to receive any messages sent + * to channel.write() or channel.writeAndFlush(). + */ +@InterfaceAudience.Private +public class NettyRpcServerChannelWritabilityHandler extends ChannelDuplexHandler { + + static final String NAME = "NettyRpcServerChannelWritabilityHandler"; + + private final MetricsHBaseServer metrics; + private final IntSupplier pendingBytesFatalThreshold; + private final BooleanSupplier isWritabilityBackpressureEnabled; + + private boolean writable = true; + private long unwritableStartTime; + + NettyRpcServerChannelWritabilityHandler(MetricsHBaseServer metrics, + IntSupplier pendingBytesFatalThreshold, BooleanSupplier isWritabilityBackpressureEnabled) { + this.metrics = metrics; + this.pendingBytesFatalThreshold = pendingBytesFatalThreshold; + this.isWritabilityBackpressureEnabled = isWritabilityBackpressureEnabled; + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) + throws Exception { + if (handleFatalThreshold(ctx)) { + promise.setFailure( + new ConnectionClosedException("Channel outbound bytes exceeded fatal threshold")); + if (msg instanceof RpcResponse) { + ((RpcResponse) msg).done(); + } else { + ReferenceCountUtil.release(msg); + } + return; + } + ctx.write(msg, promise); + } + + @Override + public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception { + if (isWritabilityBackpressureEnabled.getAsBoolean()) { + handleWritabilityChanged(ctx); + } + ctx.fireChannelWritabilityChanged(); + } + + private boolean handleFatalThreshold(ChannelHandlerContext ctx) { + int fatalThreshold = pendingBytesFatalThreshold.getAsInt(); + if (fatalThreshold <= 0) { + return false; + } + + Channel channel = ctx.channel(); + long outboundBytes = NettyUnsafeUtils.getTotalPendingOutboundBytes(channel); + if (outboundBytes < fatalThreshold) { + return false; + } + + if (channel.isOpen()) { + metrics.maxOutboundBytesExceeded(); + RpcServer.LOG.warn( + "{}: Closing connection because outbound buffer size of {} exceeds fatal threshold of {}", + channel.remoteAddress(), outboundBytes, fatalThreshold); + NettyUnsafeUtils.closeImmediately(channel); + } + + return true; + } + + private void handleWritabilityChanged(ChannelHandlerContext ctx) { + boolean oldWritableValue = this.writable; + + this.writable = ctx.channel().isWritable(); + ctx.channel().config().setAutoRead(this.writable); + + if (!oldWritableValue && this.writable) { + // changing from not writable to writable, update metrics + metrics.unwritableTime(EnvironmentEdgeManager.currentTime() - unwritableStartTime); + unwritableStartTime = 0; + } else if (oldWritableValue && !this.writable) { + // changing from writable to non-writable, set start time + unwritableStartTime = EnvironmentEdgeManager.currentTime(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index 8269bbc60d88..b79a67f986e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -22,7 +22,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; @@ -38,14 +37,15 @@ class NettyRpcServerPreambleHandler extends SimpleChannelInboundHandler static final String DECODER_NAME = "preambleDecoder"; private final NettyRpcServer rpcServer; + private final NettyServerRpcConnection conn; - public NettyRpcServerPreambleHandler(NettyRpcServer rpcServer) { + public NettyRpcServerPreambleHandler(NettyRpcServer rpcServer, NettyServerRpcConnection conn) { this.rpcServer = rpcServer; + this.conn = conn; } @Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { - NettyServerRpcConnection conn = createNettyServerRpcConnection(ctx.channel()); ByteBuffer buf = ByteBuffer.allocate(msg.readableBytes()); msg.readBytes(buf); buf.flip(); @@ -76,9 +76,4 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E ctx.channel().remoteAddress(), cause); NettyFutureUtils.safeClose(ctx); } - - // will be overridden in tests - protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { - return new NettyServerRpcConnection(rpcServer, channel); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java index fd0c6d75d888..4f0540da80a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java @@ -54,6 +54,6 @@ class NettyServerCall extends ServerCall { public synchronized void sendResponseIfReady() throws IOException { // set param null to reduce memory pressure this.param = null; - connection.channel.writeAndFlush(this); + connection.doRespond(this); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/FailingNettyRpcServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/FailingNettyRpcServer.java index d5c408c23874..da4f70e3a247 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/FailingNettyRpcServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/FailingNettyRpcServer.java @@ -49,12 +49,7 @@ public void processRequest(ByteBuff buf) throws IOException, InterruptedExceptio } @Override - protected NettyRpcServerPreambleHandler createNettyRpcServerPreambleHandler() { - return new NettyRpcServerPreambleHandler(FailingNettyRpcServer.this) { - @Override - protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { - return new FailingConnection(FailingNettyRpcServer.this, channel); - } - }; + protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { + return new FailingConnection(FailingNettyRpcServer.this, channel); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java index 6e5dfe87fc7b..7170413bee90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.hadoop.hbase.util.Pair; + public class MetricsHBaseServerWrapperStub implements MetricsHBaseServerWrapper { @Override public long getTotalQueueSize() { @@ -127,4 +129,9 @@ public int getMetaPriorityQueueLength() { public int getActiveMetaPriorityRpcHandlerCount() { return 1; } + + @Override + public Pair getTotalAndMaxNettyOutboundBytes() { + return Pair.newPair(100L, 5L); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java new file mode 100644 index 000000000000..001f6dbd22c7 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl.SERVICE; +import static org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl.newBlockingStub; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RPCTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hbase.thirdparty.io.netty.channel.Channel; + +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos; +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos; + +@Category({ RPCTests.class, MediumTests.class }) +public class TestNettyChannelWritability { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestNettyChannelWritability.class); + + private static final MetricsAssertHelper METRICS_ASSERT = + CompatibilityFactory.getInstance(MetricsAssertHelper.class); + + private static final byte[] CELL_BYTES = Bytes.toBytes("xyz"); + private static final KeyValue CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, CELL_BYTES); + + /** + * Test that we properly send configured watermarks to netty, and trigger setWritable when + * necessary. + */ + @Test + public void testNettyWritableWatermarks() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.setInt(NettyRpcServer.CHANNEL_WRITABLE_LOW_WATERMARK_KEY, 1); + conf.setInt(NettyRpcServer.CHANNEL_WRITABLE_HIGH_WATERMARK_KEY, 2); + + NettyRpcServer rpcServer = createRpcServer(conf, 0); + try { + sendAndReceive(conf, rpcServer, 5); + METRICS_ASSERT.assertCounterGt("unwritableTime_numOps", 0, + rpcServer.metrics.getMetricsSource()); + } finally { + rpcServer.stop(); + } + } + + /** + * Test that our fatal watermark is honored, which requires artificially causing some queueing so + * that pendingOutboundBytes increases. + */ + @Test + public void testNettyWritableFatalThreshold() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.setInt(NettyRpcServer.CHANNEL_WRITABLE_FATAL_WATERMARK_KEY, 1); + + // flushAfter is 3 here, with requestCount 5 below. If we never flush, the WriteTasks will sit + // in the eventloop. So we flush a few at once, which will ensure that we hit fatal threshold + NettyRpcServer rpcServer = createRpcServer(conf, 3); + try { + CompletionException exception = + assertThrows(CompletionException.class, () -> sendAndReceive(conf, rpcServer, 5)); + assertTrue(exception.getCause().getCause() instanceof ServiceException); + METRICS_ASSERT.assertCounterGt("maxOutboundBytesExceeded", 0, + rpcServer.metrics.getMetricsSource()); + } finally { + rpcServer.stop(); + } + } + + private void sendAndReceive(Configuration conf, NettyRpcServer rpcServer, int requestCount) + throws Exception { + List cells = new ArrayList<>(); + int count = 3; + for (int i = 0; i < count; i++) { + cells.add(CELL); + } + + try (NettyRpcClient client = new NettyRpcClient(conf)) { + rpcServer.start(); + TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface stub = + newBlockingStub(client, rpcServer.getListenerAddress()); + CompletableFuture[] futures = new CompletableFuture[requestCount]; + for (int i = 0; i < requestCount; i++) { + futures[i] = CompletableFuture.runAsync(() -> { + try { + sendMessage(cells, stub); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + CompletableFuture.allOf(futures).join(); + } + } + + private void sendMessage(List cells, + TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface stub) throws Exception { + HBaseRpcController pcrc = new HBaseRpcControllerImpl(CellUtil.createCellScanner(cells)); + String message = "hello"; + assertEquals(message, + stub.echo(pcrc, TestProtos.EchoRequestProto.newBuilder().setMessage(message).build()) + .getMessage()); + int index = 0; + CellScanner cellScanner = pcrc.cellScanner(); + assertNotNull(cellScanner); + while (cellScanner.advance()) { + assertEquals(CELL, cellScanner.current()); + index++; + } + assertEquals(cells.size(), index); + } + + private NettyRpcServer createRpcServer(Configuration conf, int flushAfter) throws IOException { + String name = "testRpcServer"; + ArrayList services = + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)); + + InetSocketAddress bindAddress = new InetSocketAddress("localhost", 0); + FifoRpcScheduler scheduler = new FifoRpcScheduler(conf, 1); + + AtomicInteger writeCount = new AtomicInteger(0); + + return new NettyRpcServer(null, name, services, bindAddress, conf, scheduler, true) { + @Override + protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { + return new NettyServerRpcConnection(this, channel) { + @Override + protected void doRespond(RpcResponse resp) { + if (writeCount.incrementAndGet() >= flushAfter) { + super.doRespond(resp); + } else { + channel.write(resp); + } + } + }; + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java index 288bb3fe2624..c55568d392ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java @@ -89,6 +89,9 @@ public void testWrapperSource() { HELPER.assertGauge("numCallsInWriteQueue", 50, serverSource); HELPER.assertGauge("numCallsInReadQueue", 50, serverSource); HELPER.assertGauge("numCallsInScanQueue", 2, serverSource); + HELPER.assertGauge("nettyDirectMemoryUsage", 100, serverSource); + HELPER.assertGauge("nettyTotalPendingOutboundBytes", 100, serverSource); + HELPER.assertGauge("nettyMaxPendingOutboundBytes", 5, serverSource); } /** @@ -100,6 +103,12 @@ public void testSourceMethods() { new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource serverSource = mrpc.getMetricsSource(); + mrpc.unwritableTime(100); + mrpc.maxOutboundBytesExceeded(); + mrpc.maxOutboundBytesExceeded(); + HELPER.assertCounter("maxOutboundBytesExceeded", 2, serverSource); + HELPER.assertCounter("unwritableTime_NumOps", 1, serverSource); + for (int i = 0; i < 12; i++) { mrpc.authenticationFailure(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java index 9f6b7d54430b..bc791754a12e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java @@ -28,7 +28,7 @@ import java.io.File; import java.net.InetSocketAddress; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -49,9 +49,7 @@ import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface; @@ -131,29 +129,15 @@ public void test() throws Exception { .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); - final AtomicBoolean useSaslRef = new AtomicBoolean(false); + final AtomicReference conn = new AtomicReference<>(null); NettyRpcServer rpcServer = new NettyRpcServer(null, getClass().getSimpleName(), Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress(HOST, 0), serverConf, new FifoRpcScheduler(serverConf, 1), true) { @Override - protected NettyRpcServerPreambleHandler createNettyRpcServerPreambleHandler() { - return new NettyRpcServerPreambleHandler(this) { - private NettyServerRpcConnection conn; - - @Override - protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { - super.channelRead0(ctx, msg); - useSaslRef.set(conn.useSasl); - - } - - @Override - protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { - conn = super.createNettyServerRpcConnection(channel); - return conn; - } - }; + protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { + conn.set(super.createNettyServerRpcConnection(channel)); + return conn.get(); } }; @@ -167,7 +151,7 @@ protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channe stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage("test").build()) .getMessage(); assertTrue("test".equals(response)); - assertFalse(useSaslRef.get()); + assertFalse(conn.get().useSasl); } finally { rpcServer.stop(); From dae078e5bc342012b49cd066027eb53ae9a21280 Mon Sep 17 00:00:00 2001 From: jbewing Date: Sun, 20 Aug 2023 02:53:22 -0400 Subject: [PATCH 055/514] HBASE-28025 Enhance ByteBufferUtils.findCommonPrefix to compare 8 bytes each time (#5354) Signed-off-by: Duo Zhang --- .../hadoop/hbase/util/ByteBufferUtils.java | 185 +++++++++++++++--- .../org/apache/hadoop/hbase/util/Bytes.java | 107 +++++++++- .../hbase/util/TestByteBufferUtils.java | 31 +++ .../apache/hadoop/hbase/util/TestBytes.java | 43 ++++ 4 files changed, 329 insertions(+), 37 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index a5a5c5105db0..054de74d7d1e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -80,6 +80,14 @@ static abstract class Converter { abstract int putLong(ByteBuffer buffer, int index, long val); } + static abstract class CommonPrefixer { + abstract int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength); + + abstract int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, ByteBuffer right, + int rightOffset, int rightLength); + } + static class ComparerHolder { static final String UNSAFE_COMPARER_NAME = ComparerHolder.class.getName() + "$UnsafeComparer"; @@ -322,6 +330,111 @@ int putLong(ByteBuffer buffer, int index, long val) { } } + static class CommonPrefixerHolder { + static final String UNSAFE_COMMON_PREFIXER_NAME = + CommonPrefixerHolder.class.getName() + "$UnsafeCommonPrefixer"; + + static final CommonPrefixer BEST_COMMON_PREFIXER = getBestCommonPrefixer(); + + static CommonPrefixer getBestCommonPrefixer() { + try { + Class theClass = + Class.forName(UNSAFE_COMMON_PREFIXER_NAME).asSubclass(CommonPrefixer.class); + + return theClass.getConstructor().newInstance(); + } catch (Throwable t) { // ensure we really catch *everything* + return PureJavaCommonPrefixer.INSTANCE; + } + } + + static final class PureJavaCommonPrefixer extends CommonPrefixer { + static final PureJavaCommonPrefixer INSTANCE = new PureJavaCommonPrefixer(); + + private PureJavaCommonPrefixer() { + } + + @Override + public int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength) { + int length = Math.min(leftLength, rightLength); + int result = 0; + + while ( + result < length + && ByteBufferUtils.toByte(left, leftOffset + result) == right[rightOffset + result] + ) { + result++; + } + + return result; + } + + @Override + int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, ByteBuffer right, + int rightOffset, int rightLength) { + int length = Math.min(leftLength, rightLength); + int result = 0; + + while ( + result < length && ByteBufferUtils.toByte(left, leftOffset + result) + == ByteBufferUtils.toByte(right, rightOffset + result) + ) { + result++; + } + + return result; + } + } + + static final class UnsafeCommonPrefixer extends CommonPrefixer { + + static { + if (!UNSAFE_UNALIGNED) { + throw new Error(); + } + } + + public UnsafeCommonPrefixer() { + } + + @Override + public int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength) { + long offset1Adj; + Object refObj1 = null; + if (left.isDirect()) { + offset1Adj = leftOffset + UnsafeAccess.directBufferAddress(left); + } else { + offset1Adj = leftOffset + left.arrayOffset() + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET; + refObj1 = left.array(); + } + return findCommonPrefixUnsafe(refObj1, offset1Adj, leftLength, right, + rightOffset + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET, rightLength); + } + + @Override + public int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, ByteBuffer right, + int rightOffset, int rightLength) { + long offset1Adj, offset2Adj; + Object refObj1 = null, refObj2 = null; + if (left.isDirect()) { + offset1Adj = leftOffset + UnsafeAccess.directBufferAddress(left); + } else { + offset1Adj = leftOffset + left.arrayOffset() + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET; + refObj1 = left.array(); + } + if (right.isDirect()) { + offset2Adj = rightOffset + UnsafeAccess.directBufferAddress(right); + } else { + offset2Adj = rightOffset + right.arrayOffset() + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET; + refObj2 = right.array(); + } + return findCommonPrefixUnsafe(refObj1, offset1Adj, leftLength, refObj2, offset2Adj, + rightLength); + } + } + } + /** * Similar to {@link WritableUtils#writeVLong(java.io.DataOutput, long)}, but writes to a * {@link ByteBuffer}. @@ -744,14 +857,7 @@ public static void copyFromBufferToBuffer(ByteBuffer in, ByteBuffer out, int sou */ public static int findCommonPrefix(byte[] left, int leftOffset, int leftLength, byte[] right, int rightOffset, int rightLength) { - int length = Math.min(leftLength, rightLength); - int result = 0; - - while (result < length && left[leftOffset + result] == right[rightOffset + result]) { - result++; - } - - return result; + return Bytes.findCommonPrefix(left, right, leftLength, rightLength, leftOffset, rightOffset); } /** @@ -765,17 +871,8 @@ public static int findCommonPrefix(byte[] left, int leftOffset, int leftLength, */ public static int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, ByteBuffer right, int rightOffset, int rightLength) { - int length = Math.min(leftLength, rightLength); - int result = 0; - - while ( - result < length && ByteBufferUtils.toByte(left, leftOffset + result) - == ByteBufferUtils.toByte(right, rightOffset + result) - ) { - result++; - } - - return result; + return CommonPrefixerHolder.BEST_COMMON_PREFIXER.findCommonPrefix(left, leftOffset, leftLength, + right, rightOffset, rightLength); } /** @@ -789,17 +886,8 @@ public static int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLeng */ public static int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, byte[] right, int rightOffset, int rightLength) { - int length = Math.min(leftLength, rightLength); - int result = 0; - - while ( - result < length - && ByteBufferUtils.toByte(left, leftOffset + result) == right[rightOffset + result] - ) { - result++; - } - - return result; + return CommonPrefixerHolder.BEST_COMMON_PREFIXER.findCommonPrefix(left, leftOffset, leftLength, + right, rightOffset, rightLength); } /** @@ -972,6 +1060,43 @@ static int compareToUnsafe(Object obj1, long o1, int l1, Object obj2, long o2, i return l1 - l2; } + static int findCommonPrefixUnsafe(Object left, long leftOffset, int leftLength, Object right, + long rightOffset, int rightLength) { + final int stride = 8; + final int minLength = Math.min(leftLength, rightLength); + int strideLimit = minLength & ~(stride - 1); + int result = 0; + int i; + + for (i = 0; i < strideLimit; i += stride) { + long lw = HBasePlatformDependent.getLong(left, leftOffset + (long) i); + long rw = HBasePlatformDependent.getLong(right, rightOffset + (long) i); + + if (lw != rw) { + if (!UnsafeAccess.LITTLE_ENDIAN) { + return result + (Long.numberOfLeadingZeros(lw ^ rw) / Bytes.SIZEOF_LONG); + } else { + return result + (Long.numberOfTrailingZeros(lw ^ rw) / Bytes.SIZEOF_LONG); + } + } else { + result += Bytes.SIZEOF_LONG; + } + } + + // The epilogue to cover the last (minLength % stride) elements. + for (; i < minLength; i++) { + byte il = HBasePlatformDependent.getByte(left, leftOffset + i); + byte ir = HBasePlatformDependent.getByte(right, rightOffset + i); + if (il != ir) { + return result; + } else { + result++; + } + } + + return result; + } + /** * Reads a short value at the given buffer's offset. * @param buffer input byte buffer to read diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 0203cc390fe8..96b3dbd4a8a5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -1179,6 +1179,11 @@ static abstract class Converter { } + static abstract class CommonPrefixer { + abstract int findCommonPrefix(byte[] left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength); + } + static Comparer lexicographicalComparerJavaImpl() { return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; } @@ -1453,6 +1458,99 @@ public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, i } } + static class CommonPrefixerHolder { + static final String UNSAFE_COMMON_PREFIXER_NAME = + CommonPrefixerHolder.class.getName() + "$UnsafeCommonPrefixer"; + + static final CommonPrefixer BEST_COMMON_PREFIXER = getBestCommonPrefixer(); + + static CommonPrefixer getBestCommonPrefixer() { + try { + Class theClass = + Class.forName(UNSAFE_COMMON_PREFIXER_NAME).asSubclass(CommonPrefixer.class); + + return theClass.getConstructor().newInstance(); + } catch (Throwable t) { // ensure we really catch *everything* + return CommonPrefixerHolder.PureJavaCommonPrefixer.INSTANCE; + } + } + + static final class PureJavaCommonPrefixer extends CommonPrefixer { + static final PureJavaCommonPrefixer INSTANCE = new PureJavaCommonPrefixer(); + + private PureJavaCommonPrefixer() { + } + + @Override + public int findCommonPrefix(byte[] left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength) { + int length = Math.min(leftLength, rightLength); + int result = 0; + + while (result < length && left[leftOffset + result] == right[rightOffset + result]) { + result++; + } + return result; + } + } + + static final class UnsafeCommonPrefixer extends CommonPrefixer { + + static { + if (!UNSAFE_UNALIGNED) { + throw new Error(); + } + + // sanity check - this should never fail + if (HBasePlatformDependent.arrayIndexScale(byte[].class) != 1) { + throw new AssertionError(); + } + } + + public UnsafeCommonPrefixer() { + } + + @Override + public int findCommonPrefix(byte[] left, int leftOffset, int leftLength, byte[] right, + int rightOffset, int rightLength) { + final int stride = 8; + final int minLength = Math.min(leftLength, rightLength); + int strideLimit = minLength & ~(stride - 1); + final long leftOffsetAdj = leftOffset + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET; + final long rightOffsetAdj = rightOffset + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET; + int result = 0; + int i; + + for (i = 0; i < strideLimit; i += stride) { + long lw = HBasePlatformDependent.getLong(left, leftOffsetAdj + i); + long rw = HBasePlatformDependent.getLong(right, rightOffsetAdj + i); + if (lw != rw) { + if (!UnsafeAccess.LITTLE_ENDIAN) { + return result + (Long.numberOfLeadingZeros(lw ^ rw) / Bytes.SIZEOF_LONG); + } else { + return result + (Long.numberOfTrailingZeros(lw ^ rw) / Bytes.SIZEOF_LONG); + } + } else { + result += Bytes.SIZEOF_LONG; + } + } + + // The epilogue to cover the last (minLength % stride) elements. + for (; i < minLength; i++) { + int il = (left[leftOffset + i]); + int ir = (right[rightOffset + i]); + if (il != ir) { + return result; + } else { + result++; + } + } + + return result; + } + } + } + /** * Lexicographically determine the equality of two arrays. * @param left left operand @@ -2429,12 +2527,7 @@ public static int searchDelimiterIndexInReverse(final byte[] b, final int offset public static int findCommonPrefix(byte[] left, byte[] right, int leftLength, int rightLength, int leftOffset, int rightOffset) { - int length = Math.min(leftLength, rightLength); - int result = 0; - - while (result < length && left[leftOffset + result] == right[rightOffset + result]) { - result++; - } - return result; + return CommonPrefixerHolder.BEST_COMMON_PREFIXER.findCommonPrefix(left, leftOffset, leftLength, + right, rightOffset, rightLength); } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java index c824e01e4256..eabfed2042ca 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java @@ -606,6 +606,37 @@ public void testEquals() { assertTrue(ByteBufferUtils.equals(bb, 0, a.length, a, 0, a.length)); } + @Test + public void testFindCommonPrefix() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + ByteBuffer bb3 = ByteBuffer.allocateDirect(135); + byte[] b = new byte[71]; + + fillBB(bb1, (byte) 5); + fillBB(bb2, (byte) 5); + fillBB(bb3, (byte) 5); + fillArray(b, (byte) 5); + + assertEquals(135, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + assertEquals(71, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); + assertEquals(135, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb3, 0, bb3.remaining())); + assertEquals(71, ByteBufferUtils.findCommonPrefix(bb3, 0, bb3.remaining(), b, 0, b.length)); + + b[13] = 9; + assertEquals(13, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); + + bb2.put(134, (byte) 6); + assertEquals(134, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + + bb2.put(6, (byte) 4); + assertEquals(6, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + } + private static void fillBB(ByteBuffer bb, byte b) { for (int i = bb.position(); i < bb.limit(); i++) { bb.put(i, b); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index 14be2f4cc37a..b74348959982 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -585,4 +585,47 @@ public void testToFromHex() { assertArrayEquals(testData, result); } } + + @Test + public void testFindCommonPrefix() throws Exception { + testFindCommonPrefix(false); + } + + @Test + public void testFindCommonPrefixUnsafe() throws Exception { + testFindCommonPrefix(true); + } + + private static void testFindCommonPrefix(boolean unsafe) throws Exception { + setUnsafe(unsafe); + try { + // tests for common prefixes less than 8 bytes in length (i.e. using non-vectorized path) + byte[] hello = Bytes.toBytes("hello"); + byte[] helloWorld = Bytes.toBytes("helloworld"); + + assertEquals(5, + Bytes.findCommonPrefix(hello, helloWorld, hello.length, helloWorld.length, 0, 0)); + assertEquals(5, Bytes.findCommonPrefix(hello, hello, hello.length, hello.length, 0, 0)); + assertEquals(3, + Bytes.findCommonPrefix(hello, hello, hello.length - 2, hello.length - 2, 2, 2)); + assertEquals(0, Bytes.findCommonPrefix(hello, hello, 0, 0, 0, 0)); + + // tests for common prefixes greater than 8 bytes in length which may use the vectorized path + byte[] hellohello = Bytes.toBytes("hellohello"); + byte[] hellohellohi = Bytes.toBytes("hellohellohi"); + + assertEquals(10, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, + hellohellohi.length, 0, 0)); + assertEquals(10, Bytes.findCommonPrefix(hellohellohi, hellohello, hellohellohi.length, + hellohello.length, 0, 0)); + assertEquals(10, + Bytes.findCommonPrefix(hellohello, hellohello, hellohello.length, hellohello.length, 0, 0)); + + hellohello[2] = 0; + assertEquals(2, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, + hellohellohi.length, 0, 0)); + } finally { + setUnsafe(HBasePlatformDependent.unaligned()); + } + } } From 3f2e655577390d54e312013d8f2446f5e8f48791 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Mon, 21 Aug 2023 23:34:13 +0530 Subject: [PATCH 056/514] HBASE-28032 Fix ChaosMonkey documentation code block rendering (#5359) Signed-off-by: Peter Somogyi --- src/main/asciidoc/_chapters/developer.adoc | 40 +++++++++++++++++----- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index ea327fb3e253..ec3c4659bf09 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1807,23 +1807,23 @@ Chaos monkey can be run without SSH using the Chaos service and ZNode cluster ma with many cluster managers, available in the `hbase-it/src/test/java/org/apache/hadoop/hbase/` directory. Set the following property in hbase configuration to switch to `ZNodeClusterManager`: -`hbase.it.clustermanager.class=org.apache.hadoop.hbase.ZNodeClusterManager` +[source,xml] +---- + + hbase.it.clustermanager.class + org.apache.hadoop.hbase.ZNodeClusterManager + +---- Start chaos agent on all hosts where you want to test chaos scenarios. [source,bash] ---- $ bin/hbase org.apache.hadoop.hbase.chaos.ChaosService -c start +---- Start chaos monkey runner from any one host, preferrably an edgenode. -An example log while running chaos monkey with default policy PeriodicRandomActionPolicy is shown below. -Command Options: - -c Name of extra configurations file to find on CLASSPATH - -m,--monkey Which chaos monkey to run - -monkeyProps The properties file for specifying chaos monkey properties. - -tableName Table name in the test to run chaos monkey against - -familyName Family name in the test to run chaos monkey against - +An example log while running chaos monkey with default policy `PeriodicRandomActionPolicy` is as shown below: [source,bash] ---- $ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner @@ -1858,6 +1858,28 @@ Number of requests: 118645157 Number of regions: 2654 Number of regions in transition: 0 INFO [ChaosMonkey-3] policies.Policy: Sleeping for 89614 ms +---- + +For info on more customisations we can see help for the `ChaosMonkeyRunner`. For example we can pass the table name on which the chaos operations to be performed etc. +Below is the output of the help command, listing all the supported options. +[source,bash] +---- +$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner --help + +usage: hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner +Options: + -c Name of extra configurations file to find on CLASSPATH + -m,--monkey Which chaos monkey to run + -monkeyProps The properties file for specifying chaos monkey properties. + -tableName Table name in the test to run chaos monkey against + -familyName Family name in the test to run chaos monkey against +---- + +For example, running the following will start `ServerKillingMonkeyFactory` that chooses among actions to rolling batch restart RS, graceful rolling restart RS one at a time, restart active master, force balancer run etc. +[source,bash] +---- +$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner -m org.apache.hadoop.hbase.chaos.factories.ServerKillingMonkeyFactory +---- ==== Available Policies HBase ships with several ChaosMonkey policies, available in the From d309e99f0ab5a95b041aff009ef187d4e3e65daa Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Mon, 21 Aug 2023 23:43:53 +0530 Subject: [PATCH 057/514] HBASE-27966 HBase Master/RS JVM metrics populated incorrectly (#5323) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang Signed-off-by: Reid Chan --- .../org/apache/hadoop/hbase/io/MetricsIO.java | 21 +++- .../apache/hadoop/hbase/io/hfile/HFile.java | 10 +- .../hbase/regionserver/TestMetricsJvm.java | 113 ++++++++++++++++++ 3 files changed, 136 insertions(+), 8 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsJvm.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java index c2197cef9457..58e6f7d01b71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.io; +import com.google.errorprone.annotations.RestrictedApi; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory; import org.apache.yetus.audience.InterfaceAudience; @@ -24,10 +25,13 @@ @InterfaceAudience.Private public class MetricsIO { + private static volatile MetricsIO instance; private final MetricsIOSource source; private final MetricsIOWrapper wrapper; - public MetricsIO(MetricsIOWrapper wrapper) { + @RestrictedApi(explanation = "Should only be called in TestMetricsIO", link = "", + allowedOnPath = ".*/(MetricsIO|TestMetricsIO).java") + MetricsIO(MetricsIOWrapper wrapper) { this(CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) .createIO(wrapper), wrapper); } @@ -37,6 +41,21 @@ public MetricsIO(MetricsIOWrapper wrapper) { this.wrapper = wrapper; } + /** + * Get a static instance for the MetricsIO so that accessors access the same instance. We want to + * lazy initialize so that correct process name is in place. See HBASE-27966 for more details. + */ + public static MetricsIO getInstance() { + if (instance == null) { + synchronized (MetricsIO.class) { + if (instance == null) { + instance = new MetricsIO(new MetricsIOWrapperImpl()); + } + } + } + return instance; + } + public MetricsIOSource getMetricsSource() { return source; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 73346e8ae4ac..207c99866511 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.MetricsIO; -import org.apache.hadoop.hbase.io.MetricsIOWrapperImpl; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; @@ -168,9 +167,6 @@ public final class HFile { // For tests. Gets incremented when we read a block whether from HDFS or from Cache. public static final LongAdder DATABLOCK_READ_COUNT = new LongAdder(); - /** Static instance for the metrics so that HFileReaders access the same instance */ - static final MetricsIO metrics = new MetricsIO(new MetricsIOWrapperImpl()); - /** * Shutdown constructor. */ @@ -193,14 +189,14 @@ public static final long getChecksumFailuresCount() { public static final void updateReadLatency(long latencyMillis, boolean pread) { if (pread) { - metrics.updateFsPreadTime(latencyMillis); + MetricsIO.getInstance().updateFsPreadTime(latencyMillis); } else { - metrics.updateFsReadTime(latencyMillis); + MetricsIO.getInstance().updateFsReadTime(latencyMillis); } } public static final void updateWriteLatency(long latencyMillis) { - metrics.updateFsWriteTime(latencyMillis); + MetricsIO.getInstance().updateFsWriteTime(latencyMillis); } /** API required to write an {@link HFile} */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsJvm.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsJvm.java new file mode 100644 index 000000000000..6c1ec1dc0acb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsJvm.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static junit.framework.TestCase.assertFalse; +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.MetricsTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.util.EntityUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MetricsTests.class, SmallTests.class }) +public class TestMetricsJvm { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMetricsJvm.class); + + private final static HBaseTestingUtil UTIL = new HBaseTestingUtil(); + private static Configuration conf; + + @BeforeClass + public static void before() throws Exception { + conf = UTIL.getConfiguration(); + // The master info server does not run in tests by default. + // Set it to ephemeral port so that it will start + conf.setInt(HConstants.MASTER_INFO_PORT, 0); + UTIL.startMiniCluster(); + } + + @AfterClass + public static void after() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testJvmMetrics() throws Exception { + final Pair jmxPage = getJmxPage("?qry=Hadoop:service=HBase,name=JvmMetrics*"); + assertNotNull(jmxPage); + + final Integer responseCode = jmxPage.getFirst(); + final String responseBody = jmxPage.getSecond(); + + assertEquals(HttpURLConnection.HTTP_OK, responseCode.intValue()); + assertNotNull(responseBody); + + assertNotFind("\"tag.ProcessName\"\\s*:\\s*\"IO\"", responseBody); + assertReFind("\"tag.ProcessName\"\\s*:\\s*\"Master\"", responseBody); + } + + private Pair getJmxPage(String query) throws Exception { + URL url = new URL("http://localhost:" + + UTIL.getHBaseCluster().getMaster().getInfoServer().getPort() + "/jmx" + query); + return getUrlContent(url); + } + + private Pair getUrlContent(URL url) throws Exception { + try (CloseableHttpClient client = HttpClients.createDefault()) { + CloseableHttpResponse resp = client.execute(new HttpGet(url.toURI())); + int code = resp.getStatusLine().getStatusCode(); + if (code == HttpURLConnection.HTTP_OK) { + return new Pair<>(code, EntityUtils.toString(resp.getEntity())); + } + return new Pair<>(code, null); + } + } + + private void assertReFind(String re, String value) { + Pattern p = Pattern.compile(re); + Matcher m = p.matcher(value); + assertTrue("'" + p + "' does not match " + value, m.find()); + } + + private void assertNotFind(String re, String value) { + Pattern p = Pattern.compile(re); + Matcher m = p.matcher(value); + assertFalse("'" + p + "' should not match " + value, m.find()); + } +} From 94ed6add066932b718176fc20a4553b835cf23cf Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Wed, 23 Aug 2023 10:17:21 +0100 Subject: [PATCH 058/514] HBASE-28004 Persistent cache map can get corrupt if crash happens midway through the write (#5341) Signed-off-by: Ankit Singhal Reviewed-by: Rahul Agarkar --- .../protobuf/server/io/BucketCacheEntry.proto | 3 + .../hadoop/hbase/io/hfile/CacheConfig.java | 2 - .../hbase/io/hfile/HFilePreadReader.java | 41 +++- .../hbase/io/hfile/PrefetchExecutor.java | 89 +------- .../hbase/io/hfile/bucket/BucketCache.java | 167 +++++++++++--- .../hbase/io/hfile/bucket/BucketEntry.java | 28 ++- .../io/hfile/bucket/BucketProtoUtils.java | 10 +- .../hbase/io/hfile/bucket/FileIOEngine.java | 37 ++- .../TestBlockEvictionOnRegionMovement.java | 1 - .../hbase/io/hfile/TestPrefetchRSClose.java | 4 - .../io/hfile/TestPrefetchWithBucketCache.java | 211 ++++++++++++++++++ .../io/hfile/bucket/TestBucketCache.java | 84 ++++--- .../bucket/TestBucketCachePersister.java | 9 +- .../hfile/bucket/TestByteBufferIOEngine.java | 2 +- .../hfile/bucket/TestPrefetchPersistence.java | 9 +- .../hbase/io/hfile/bucket/TestRAMCache.java | 2 +- .../bucket/TestVerifyBucketCacheFile.java | 102 +++++++-- 17 files changed, 601 insertions(+), 200 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java diff --git a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto index 038c6ca3f04d..ae1980fe51e6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto @@ -32,6 +32,7 @@ message BucketCacheEntry { map deserializers = 4; required BackingMap backing_map = 5; optional bytes checksum = 6; + map prefetched_files = 7; } message BackingMap { @@ -71,6 +72,8 @@ message BucketEntry { required int64 access_counter = 3; required int32 deserialiser_index = 4; required BlockPriority priority = 5; + required int64 cachedTime = 6; + optional int32 disk_size_with_header = 7; } enum BlockPriority { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 15c64c03d5e5..57f91fa19f44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -93,8 +93,6 @@ public class CacheConfig { public static final String DROP_BEHIND_CACHE_COMPACTION_KEY = "hbase.hfile.drop.behind.compaction"; - public static final String PREFETCH_PERSISTENCE_PATH_KEY = "hbase.prefetch.file.list.path"; - /** * Configuration key to set interval for persisting bucket cache to disk. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 2c71ce9f4842..f9c0ae592424 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -18,9 +18,13 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; +import java.util.Optional; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,8 +39,14 @@ public class HFilePreadReader extends HFileReaderImpl { public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); + final MutableBoolean fileAlreadyCached = new MutableBoolean(false); + BucketCache.getBuckedCacheFromCacheConfig(cacheConf).ifPresent(bc -> fileAlreadyCached + .setValue(bc.getFullyCachedFiles().get(path.getName()) == null ? false : true)); // Prefetch file blocks upon open if requested - if (cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff()) { + if ( + cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() + && !fileAlreadyCached.booleanValue() + ) { PrefetchExecutor.request(path, new Runnable() { @Override public void run() { @@ -55,12 +65,36 @@ public void run() { if (LOG.isTraceEnabled()) { LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, end)); } + Optional bucketCacheOptional = + BucketCache.getBuckedCacheFromCacheConfig(cacheConf); // Don't use BlockIterator here, because it's designed to read load-on-open section. long onDiskSizeOfNextBlock = -1; while (offset < end) { if (Thread.interrupted()) { break; } + // BucketCache can be persistent and resilient to restarts, so we check first if the + // block exists on its in-memory index, if so, we just update the offset and move on + // to the next block without actually going read all the way to the cache. + if (bucketCacheOptional.isPresent()) { + BucketCache cache = bucketCacheOptional.get(); + BlockCacheKey cacheKey = new BlockCacheKey(name, offset); + BucketEntry entry = cache.getBackingMap().get(cacheKey); + if (entry != null) { + cacheKey = new BlockCacheKey(name, offset); + entry = cache.getBackingMap().get(cacheKey); + if (entry == null) { + LOG.debug("No cache key {}, we'll read and cache it", cacheKey); + } else { + offset += entry.getOnDiskSizeWithHeader(); + LOG.debug("Found cache key {}. Skipping prefetch, the block is already cached.", + cacheKey); + continue; + } + } else { + LOG.debug("No entry in the backing map for cache key {}", cacheKey); + } + } // Perhaps we got our block from cache? Unlikely as this may be, if it happens, then // the internal-to-hfileblock thread local which holds the overread that gets the // next header, will not have happened...so, pass in the onDiskSize gotten from the @@ -77,12 +111,15 @@ public void run() { block.release(); } } + BucketCache.getBuckedCacheFromCacheConfig(cacheConf) + .ifPresent(bc -> bc.fileCacheCompleted(path.getName())); + } catch (IOException e) { // IOExceptions are probably due to region closes (relocation, etc.) if (LOG.isTraceEnabled()) { LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e); } - } catch (Exception e) { + } catch (Throwable e) { // Other exceptions are interesting LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index d3064e066a12..02fbc12e85c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -17,11 +17,6 @@ */ package org.apache.hadoop.hbase.io.hfile; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.Future; @@ -42,8 +37,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.PersistentPrefetchProtos; - @InterfaceAudience.Private public final class PrefetchExecutor { @@ -51,16 +44,12 @@ public final class PrefetchExecutor { /** Futures for tracking block prefetch activity */ private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); - /** Set of files for which prefetch is completed */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_SHOULD_BE_FINAL") - private static HashMap prefetchCompleted = new HashMap<>(); /** Executor pool shared among all HFiles for block prefetch */ private static final ScheduledExecutorService prefetchExecutorPool; /** Delay before beginning prefetch */ private static final int prefetchDelayMillis; /** Variation in prefetch delay times, to mitigate stampedes */ private static final float prefetchDelayVariation; - static String prefetchedFileListPath; static { // Consider doing this on demand with a configuration passed in rather // than in a static initializer. @@ -90,13 +79,6 @@ public Thread newThread(Runnable r) { + HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + Path.SEPARATOR_CHAR + ")"); public static void request(Path path, Runnable runnable) { - if (prefetchCompleted != null) { - if (isFilePrefetched(path.getName())) { - LOG.info( - "File has already been prefetched before the restart, so skipping prefetch : " + path); - return; - } - } if (!prefetchPathExclude.matcher(path.toString()).find()) { long delay; if (prefetchDelayMillis > 0) { @@ -122,8 +104,9 @@ public static void request(Path path, Runnable runnable) { public static void complete(Path path) { prefetchFutures.remove(path); - prefetchCompleted.put(path.getName(), true); - LOG.debug("Prefetch completed for {}", path.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Prefetch completed for {}", path.getName()); + } } public static void cancel(Path path) { @@ -134,8 +117,6 @@ public static void cancel(Path path) { prefetchFutures.remove(path); LOG.debug("Prefetch cancelled for {}", path); } - LOG.debug("Removing filename from the prefetched persistence list: {}", path.getName()); - removePrefetchedFileWhileEvict(path.getName()); } public static boolean isCompleted(Path path) { @@ -146,70 +127,6 @@ public static boolean isCompleted(Path path) { return true; } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", - justification = "false positive, try-with-resources ensures close is called.") - public static void persistToFile(String path) throws IOException { - prefetchedFileListPath = path; - if (prefetchedFileListPath == null) { - LOG.info("Exception while persisting prefetch!"); - throw new IOException("Error persisting prefetched HFiles set!"); - } - if (!prefetchCompleted.isEmpty()) { - try (FileOutputStream fos = new FileOutputStream(prefetchedFileListPath, false)) { - PrefetchProtoUtils.toPB(prefetchCompleted).writeDelimitedTo(fos); - } - } - } - - public static void retrieveFromFile(String path) throws IOException { - prefetchedFileListPath = path; - File prefetchPersistenceFile = new File(prefetchedFileListPath); - if (!prefetchPersistenceFile.exists()) { - LOG.warn("Prefetch persistence file does not exist!"); - return; - } - LOG.info("Retrieving from prefetch persistence file " + path); - assert (prefetchedFileListPath != null); - try (FileInputStream fis = deleteFileOnClose(prefetchPersistenceFile)) { - PersistentPrefetchProtos.PrefetchedHfileName proto = - PersistentPrefetchProtos.PrefetchedHfileName.parseDelimitedFrom(fis); - Map protoPrefetchedFilesMap = proto.getPrefetchedFilesMap(); - prefetchCompleted.putAll(protoPrefetchedFilesMap); - } - } - - private static FileInputStream deleteFileOnClose(final File file) throws IOException { - return new FileInputStream(file) { - private File myFile; - - private FileInputStream init(File file) { - myFile = file; - return this; - } - - @Override - public void close() throws IOException { - if (myFile == null) { - return; - } - - super.close(); - if (!myFile.delete()) { - throw new IOException("Failed deleting persistence file " + myFile.getAbsolutePath()); - } - myFile = null; - } - }.init(file); - } - - public static void removePrefetchedFileWhileEvict(String hfileName) { - prefetchCompleted.remove(hfileName); - } - - public static boolean isFilePrefetched(String hfileName) { - return prefetchCompleted.containsKey(hfileName); - } - private PrefetchExecutor() { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 14c4c44ee16f..bc5e7e7c9b9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.apache.hadoop.hbase.io.hfile.CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY; -import static org.apache.hadoop.hbase.io.hfile.CacheConfig.PREFETCH_PERSISTENCE_PATH_KEY; import java.io.File; import java.io.FileInputStream; @@ -32,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Optional; import java.util.PriorityQueue; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; @@ -52,6 +52,7 @@ import java.util.function.Function; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -62,12 +63,13 @@ import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil; import org.apache.hadoop.hbase.io.hfile.BlockPriority; import org.apache.hadoop.hbase.io.hfile.BlockType; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.CachedBlock; +import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; -import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; @@ -143,8 +145,14 @@ public class BucketCache implements BlockCache, HeapSize { // Store the block in this map before writing it to cache transient final RAMCache ramCache; + // In this map, store the block's meta data like offset, length - transient ConcurrentHashMap backingMap; + transient Map backingMap; + + /** Set of files for which prefetch is completed */ + final Map fullyCachedFiles = new ConcurrentHashMap<>(); + + private BucketCachePersister cachePersister; /** * Flag if the cache is enabled or not... We shut it off if there are IO errors for some time, so @@ -177,9 +185,6 @@ public class BucketCache implements BlockCache, HeapSize { private static final int DEFAULT_CACHE_WAIT_TIME = 50; private final BucketCacheStats cacheStats = new BucketCacheStats(); - - /** BucketCache persister thread */ - private BucketCachePersister cachePersister; private final String persistencePath; static AtomicBoolean isCacheInconsistent = new AtomicBoolean(false); private final long cacheCapacity; @@ -239,8 +244,6 @@ public class BucketCache implements BlockCache, HeapSize { /** In-memory bucket size */ private float memoryFactor; - private String prefetchedFileListPath; - private long bucketcachePersistInterval; private static final String FILE_VERIFY_ALGORITHM = @@ -293,7 +296,6 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck this.memoryFactor = conf.getFloat(MEMORY_FACTOR_CONFIG_NAME, DEFAULT_MEMORY_FACTOR); this.queueAdditionWaitTime = conf.getLong(QUEUE_ADDITION_WAIT_TIME, DEFAULT_QUEUE_ADDITION_WAIT_TIME); - this.prefetchedFileListPath = conf.get(PREFETCH_PERSISTENCE_PATH_KEY); this.bucketcachePersistInterval = conf.getLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, 1000); sanityCheckConfigs(); @@ -320,11 +322,15 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck this.backingMap = new ConcurrentHashMap<>((int) blockNumCapacity); - if (ioEngine.isPersistent() && persistencePath != null) { - startBucketCachePersisterThread(); + if (isCachePersistent()) { + if (ioEngine instanceof FileIOEngine) { + startBucketCachePersisterThread(); + } try { retrieveFromFile(bucketSizes); } catch (IOException ioex) { + backingMap.clear(); + fullyCachedFiles.clear(); LOG.error("Can't restore from file[" + persistencePath + "] because of ", ioex); } } @@ -429,7 +435,7 @@ private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String } public boolean isCachePersistenceEnabled() { - return (prefetchedFileListPath != null) && (persistencePath != null); + return persistencePath != null; } /** @@ -504,8 +510,8 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach } LOG.trace("Caching key={}, item={}", cacheKey, cachedItem); // Stuff the entry into the RAM cache so it can get drained to the persistent store - RAMQueueEntry re = - new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); + RAMQueueEntry re = new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), + inMemory, isCachePersistent() && ioEngine instanceof FileIOEngine); /** * Don't use ramCache.put(cacheKey, re) here. because there may be a existing entry with same * key in ramCache, the heap size of bucket cache need to update if replacing entry from @@ -589,6 +595,12 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, } return cachedBlock; } + } catch (HBaseIOException hioex) { + // When using file io engine persistent cache, + // the cache map state might differ from the actual cache. If we reach this block, + // we should remove the cache key entry from the backing map + backingMap.remove(key); + LOG.debug("Failed to fetch block for cache key: {}.", key, hioex); } catch (IOException ioex) { LOG.error("Failed reading block " + key + " from bucket cache", ioex); checkIOErrorIsTolerated(); @@ -616,13 +628,15 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); } if (ioEngine.isPersistent()) { - if (prefetchedFileListPath != null) { - PrefetchExecutor.removePrefetchedFileWhileEvict(cacheKey.getHfileName()); - } + fullyCachedFiles.remove(cacheKey.getHfileName()); setCacheInconsistent(true); } } + public void fileCacheCompleted(String fileName) { + fullyCachedFiles.put(fileName, true); + } + /** * Free the {{@link BucketEntry} actually,which could only be invoked when the * {@link BucketEntry#refCnt} becoming 0. @@ -1252,18 +1266,24 @@ static List getRAMQueueEntries(BlockingQueue q, @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.") void persistToFile() throws IOException { - if (!ioEngine.isPersistent()) { + if (!isCachePersistent()) { throw new IOException("Attempt to persist non-persistent cache mappings!"); } - try (FileOutputStream fos = new FileOutputStream(persistencePath, false)) { + File tempPersistencePath = new File(persistencePath + EnvironmentEdgeManager.currentTime()); + try (FileOutputStream fos = new FileOutputStream(tempPersistencePath, false)) { fos.write(ProtobufMagic.PB_MAGIC); BucketProtoUtils.toPB(this).writeDelimitedTo(fos); } - if (prefetchedFileListPath != null) { - PrefetchExecutor.persistToFile(prefetchedFileListPath); + if (!tempPersistencePath.renameTo(new File(persistencePath))) { + LOG.warn("Failed to commit cache persistent file. We might lose cached blocks if " + + "RS crashes/restarts before we successfully checkpoint again."); } } + private boolean isCachePersistent() { + return ioEngine.isPersistent() && persistencePath != null; + } + /** * @see #persistToFile() */ @@ -1273,9 +1293,6 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { return; } assert !cacheEnabled; - if (prefetchedFileListPath != null) { - PrefetchExecutor.retrieveFromFile(prefetchedFileListPath); - } try (FileInputStream in = deleteFileOnClose(persistenceFile)) { int pblen = ProtobufMagic.lengthOfPBMagic(); @@ -1358,16 +1375,37 @@ private void verifyCapacityAndClasses(long capacitySize, String ioclass, String } private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOException { + backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), + this::createRecycler); + fullyCachedFiles.clear(); + fullyCachedFiles.putAll(proto.getPrefetchedFilesMap()); if (proto.hasChecksum()) { - ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), - algorithm); + try { + ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), + algorithm); + } catch (IOException e) { + LOG.warn("Checksum for cache file failed. " + + "We need to validate each cache key in the backing map. This may take some time..."); + long startTime = EnvironmentEdgeManager.currentTime(); + int totalKeysOriginally = backingMap.size(); + for (Map.Entry keyEntry : backingMap.entrySet()) { + try { + ((FileIOEngine) ioEngine).checkCacheTime(keyEntry.getValue()); + } catch (IOException e1) { + LOG.debug("Check for key {} failed. Removing it from map.", keyEntry.getKey()); + backingMap.remove(keyEntry.getKey()); + fullyCachedFiles.remove(keyEntry.getKey().getHfileName()); + } + } + LOG.info("Finished validating {} keys in the backing map. Recovered: {}. This took {}ms.", + totalKeysOriginally, backingMap.size(), + (EnvironmentEdgeManager.currentTime() - startTime)); + } } else { // if has not checksum, it means the persistence file is old format LOG.info("Persistent file is old format, it does not support verifying file integrity!"); } verifyCapacityAndClasses(proto.getCacheCapacity(), proto.getIoClass(), proto.getMapClass()); - backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), - this::createRecycler); } /** @@ -1403,6 +1441,7 @@ private void disableCache() { if (!ioEngine.isPersistent() || persistencePath == null) { // If persistent ioengine and a path, we will serialize out the backingMap. this.backingMap.clear(); + this.fullyCachedFiles.clear(); } } @@ -1417,7 +1456,9 @@ public void shutdown() { LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() + "; path to write=" + persistencePath); if (ioEngine.isPersistent() && persistencePath != null) { - cachePersister.interrupt(); + if (cachePersister != null) { + cachePersister.interrupt(); + } try { join(); persistToFile(); @@ -1429,6 +1470,17 @@ public void shutdown() { } } + /** + * Needed mostly for UTs that might run in the same VM and create different BucketCache instances + * on different UT methods. + */ + @Override + protected void finalize() { + if (cachePersister != null && !cachePersister.isInterrupted()) { + cachePersister.interrupt(); + } + } + @Override public CacheStats getStats() { return cacheStats; @@ -1485,7 +1537,7 @@ protected String getAlgorithm() { */ @Override public int evictBlocksByHfileName(String hfileName) { - PrefetchExecutor.removePrefetchedFileWhileEvict(hfileName); + this.fullyCachedFiles.remove(hfileName); Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); @@ -1556,12 +1608,15 @@ static class RAMQueueEntry { private final Cacheable data; private long accessCounter; private boolean inMemory; + private boolean isCachePersistent; - RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessCounter, boolean inMemory) { + RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessCounter, boolean inMemory, + boolean isCachePersistent) { this.key = bck; this.data = data; this.accessCounter = accessCounter; this.inMemory = inMemory; + this.isCachePersistent = isCachePersistent; } public Cacheable getData() { @@ -1591,12 +1646,19 @@ public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator a if (len == 0) { return null; } + if (isCachePersistent && data instanceof HFileBlock) { + len += Long.BYTES; // we need to record the cache time for consistency check in case of + // recovery + } long offset = alloc.allocateBlock(len); boolean succ = false; BucketEntry bucketEntry = null; try { - bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory, createRecycler, - getByteBuffAllocator()); + int diskSizeWithHeader = (data instanceof HFileBlock) + ? ((HFileBlock) data).getOnDiskSizeWithHeader() + : data.getSerializedLength(); + bucketEntry = new BucketEntry(offset, len, diskSizeWithHeader, accessCounter, inMemory, + createRecycler, getByteBuffAllocator()); bucketEntry.setDeserializerReference(data.getDeserializer()); if (data instanceof HFileBlock) { // If an instance of HFileBlock, save on some allocations. @@ -1604,7 +1666,16 @@ public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator a ByteBuff sliceBuf = block.getBufferReadOnly(); block.getMetaData(metaBuff); ioEngine.write(sliceBuf, offset); - ioEngine.write(metaBuff, offset + len - metaBuff.limit()); + // adds the cache time after the block and metadata part + if (isCachePersistent) { + ioEngine.write(metaBuff, offset + len - metaBuff.limit() - Long.BYTES); + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(bucketEntry.getCachedTime()); + buffer.rewind(); + ioEngine.write(buffer, (offset + len - Long.BYTES)); + } else { + ioEngine.write(metaBuff, offset + len - metaBuff.limit()); + } } else { // Only used for testing. ByteBuffer bb = ByteBuffer.allocate(len); @@ -1760,6 +1831,10 @@ float getMemoryFactor() { return memoryFactor; } + public String getPersistencePath() { + return persistencePath; + } + /** * Wrapped the delegate ConcurrentMap with maintaining its block's reference count. */ @@ -1837,4 +1912,28 @@ public void clear() { } } } + + public Map getBackingMap() { + return backingMap; + } + + public Map getFullyCachedFiles() { + return fullyCachedFiles; + } + + public static Optional getBuckedCacheFromCacheConfig(CacheConfig cacheConf) { + if (cacheConf.getBlockCache().isPresent()) { + BlockCache bc = cacheConf.getBlockCache().get(); + if (bc instanceof CombinedBlockCache) { + BlockCache l2 = ((CombinedBlockCache) bc).getSecondLevelCache(); + if (l2 instanceof BucketCache) { + return Optional.of((BucketCache) l2); + } + } else if (bc instanceof BucketCache) { + return Optional.of((BucketCache) bc); + } + } + return Optional.empty(); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index a04a32bfe645..c93dac8a572b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -43,13 +43,15 @@ * bytes gives us 256TB or so. */ @InterfaceAudience.Private -class BucketEntry implements HBaseReferenceCounted { +public class BucketEntry implements HBaseReferenceCounted { // access counter comparator, descending order static final Comparator COMPARATOR = Comparator.comparingLong(BucketEntry::getAccessCounter).reversed(); private int offsetBase; private int length; + + private int onDiskSizeWithHeader; private byte offset1; /** @@ -91,24 +93,32 @@ class BucketEntry implements HBaseReferenceCounted { /** * Time this block was cached. Presumes we are created just before we are added to the cache. */ - private final long cachedTime = System.nanoTime(); + private long cachedTime = System.nanoTime(); /** * @param createRecycler used to free this {@link BucketEntry} when {@link BucketEntry#refCnt} * becoming 0. NOTICE that {@link ByteBuffAllocator#NONE} could only be used * for test. */ - BucketEntry(long offset, int length, long accessCounter, boolean inMemory, - Function createRecycler, ByteBuffAllocator allocator) { + BucketEntry(long offset, int length, int onDiskSizeWithHeader, long accessCounter, + boolean inMemory, Function createRecycler, ByteBuffAllocator allocator) { + this(offset, length, onDiskSizeWithHeader, accessCounter, System.nanoTime(), inMemory, + createRecycler, allocator); + } + + BucketEntry(long offset, int length, int onDiskSizeWithHeader, long accessCounter, + long cachedTime, boolean inMemory, Function createRecycler, + ByteBuffAllocator allocator) { if (createRecycler == null) { throw new IllegalArgumentException("createRecycler could not be null!"); } setOffset(offset); this.length = length; + this.onDiskSizeWithHeader = onDiskSizeWithHeader; this.accessCounter = accessCounter; + this.cachedTime = cachedTime; this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.MULTI; this.refCnt = RefCnt.create(createRecycler.apply(this)); - this.markedAsEvicted = new AtomicBoolean(false); this.allocator = allocator; } @@ -159,10 +169,14 @@ public BlockPriority getPriority() { return this.priority; } - long getCachedTime() { + public long getCachedTime() { return cachedTime; } + public int getOnDiskSizeWithHeader() { + return onDiskSizeWithHeader; + } + /** * The {@link BucketCache} will try to release its reference to this BucketEntry many times. we * must make sure the idempotent, otherwise it'll decrease the RPC's reference count in advance, @@ -239,7 +253,7 @@ public BucketEntry retain() { * also release its refCnt (case.1 will do this) and no other rpc reference, then it will free the * area in bucketAllocator.
* 3.evict those block without any rpc reference if cache size exceeded. we'll only free those - * blocks with zero rpc reference count, as the {@link BucketEntry#markStaleAsEvicted()} do. + * blocks with zero rpc reference count. * @return true to indicate we've decreased to zero and do the de-allocation. */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index ff4e90b88650..8830e5d3255a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -45,6 +45,7 @@ static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { .setIoClass(cache.ioEngine.getClass().getName()) .setMapClass(cache.backingMap.getClass().getName()) .putAllDeserializers(CacheableDeserializerIdManager.save()) + .putAllPrefetchedFiles(cache.fullyCachedFiles) .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) .setChecksum(ByteString .copyFrom(((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) @@ -99,8 +100,10 @@ private static BucketCacheProtos.BlockType toPB(BlockType blockType) { private static BucketCacheProtos.BucketEntry toPB(BucketEntry entry) { return BucketCacheProtos.BucketEntry.newBuilder().setOffset(entry.offset()) - .setLength(entry.getLength()).setDeserialiserIndex(entry.deserializerIndex) - .setAccessCounter(entry.getAccessCounter()).setPriority(toPB(entry.getPriority())).build(); + .setCachedTime(entry.getCachedTime()).setLength(entry.getLength()) + .setDiskSizeWithHeader(entry.getOnDiskSizeWithHeader()) + .setDeserialiserIndex(entry.deserializerIndex).setAccessCounter(entry.getAccessCounter()) + .setPriority(toPB(entry.getPriority())).build(); } private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { @@ -128,7 +131,8 @@ static ConcurrentHashMap fromPB(Map // TODO:We use ByteBuffAllocator.HEAP here, because we could not get the ByteBuffAllocator // which created by RpcServer elegantly. BucketEntry value = new BucketEntry(protoValue.getOffset(), protoValue.getLength(), - protoValue.getAccessCounter(), + protoValue.getDiskSizeWithHeader(), protoValue.getAccessCounter(), + protoValue.getCachedTime(), protoValue.getPriority() == BucketCacheProtos.BlockPriority.memory, createRecycler, ByteBuffAllocator.HEAP); // This is the deserializer that we stored diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index 370343b1b25c..38f9db04b6d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -26,6 +26,7 @@ import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -49,6 +50,7 @@ public class FileIOEngine extends PersistentIOEngine { private final long sizePerFile; private final long capacity; + private boolean maintainPersistence; private FileReadAccessor readAccessor = new FileReadAccessor(); private FileWriteAccessor writeAccessor = new FileWriteAccessor(); @@ -59,6 +61,7 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa this.sizePerFile = capacity / filePaths.length; this.capacity = this.sizePerFile * filePaths.length; this.fileChannels = new FileChannel[filePaths.length]; + this.maintainPersistence = maintainPersistence; if (!maintainPersistence) { for (String filePath : filePaths) { File file = new File(filePath); @@ -145,10 +148,42 @@ public Cacheable read(BucketEntry be) throws IOException { throw ioe; } } - dstBuff.rewind(); + if (maintainPersistence) { + dstBuff.position(length - Long.BYTES); + long cachedNanoTime = dstBuff.getLong(); + if (be.getCachedTime() != cachedNanoTime) { + dstBuff.release(); + throw new HBaseIOException("The cached time recorded within the cached block differs " + + "from its bucket entry, so it might not be the same."); + } + dstBuff.rewind(); + dstBuff.limit(length - Long.BYTES); + dstBuff = dstBuff.slice(); + } else { + dstBuff.rewind(); + } return be.wrapAsCacheable(dstBuff); } + void checkCacheTime(BucketEntry be) throws IOException { + long offset = be.offset(); + int length = be.getLength(); + ByteBuff dstBuff = be.allocator.allocate(Long.BYTES); + try { + accessFile(readAccessor, dstBuff, (offset + length - Long.BYTES)); + } catch (IOException ioe) { + dstBuff.release(); + throw ioe; + } + dstBuff.rewind(); + long cachedNanoTime = dstBuff.getLong(); + if (be.getCachedTime() != cachedNanoTime) { + dstBuff.release(); + throw new HBaseIOException("The cached time recorded within the cached block differs " + + "from its bucket entry, so it might not be the same."); + } + } + void closeFileChannels() { for (FileChannel fileChannel : fileChannels) { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java index 66b2ca73ded8..eb3e3cc61f4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java @@ -79,7 +79,6 @@ public void setup() throws Exception { conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:" + testDir + "/bucket.cache"); conf.setInt("hbase.bucketcache.size", 400); conf.set("hbase.bucketcache.persistent.path", testDir + "/bucket.persistence"); - conf.set(CacheConfig.PREFETCH_PERSISTENCE_PATH_KEY, testDir + "/prefetch.persistence"); conf.setLong(CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY, 100); conf.setBoolean(CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY, true); zkCluster = TEST_UTIL.startMiniZKCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java index b10186996ede..64db9158333d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java @@ -75,7 +75,6 @@ public void setup() throws Exception { conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:" + testDir + "/bucket.cache"); conf.setInt("hbase.bucketcache.size", 400); conf.set("hbase.bucketcache.persistent.path", testDir + "/bucket.persistence"); - conf.set(CacheConfig.PREFETCH_PERSISTENCE_PATH_KEY, testDir + "/prefetch.persistence"); zkCluster = TEST_UTIL.startMiniZKCluster(); cluster = TEST_UTIL.startMiniHBaseCluster(option); assertEquals(2, cluster.getRegionServerThreads().size()); @@ -114,18 +113,15 @@ public void testPrefetchPersistence() throws Exception { // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files // should exist. assertTrue(new File(testDir + "/bucket.persistence").exists()); - assertTrue(new File(testDir + "/prefetch.persistence").exists()); // Stop the RS cluster.stopRegionServer(0); LOG.info("Stopped Region Server 0."); Thread.sleep(1000); assertTrue(new File(testDir + "/bucket.persistence").exists()); - assertTrue(new File(testDir + "/prefetch.persistence").exists()); // Start the RS and validate cluster.startRegionServer(); - assertFalse(new File(testDir + "/prefetch.persistence").exists()); assertFalse(new File(testDir + "/bucket.persistence").exists()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java new file mode 100644 index 000000000000..e4330308243d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; + +@Category({ IOTests.class, MediumTests.class }) +public class TestPrefetchWithBucketCache { + + private static final Logger LOG = LoggerFactory.getLogger(TestPrefetchWithBucketCache.class); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestPrefetchWithBucketCache.class); + + @Rule + public TestName name = new TestName(); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; + private static final int DATA_BLOCK_SIZE = 2048; + private static final int NUM_KV = 100; + + private Configuration conf; + private CacheConfig cacheConf; + private FileSystem fs; + private BlockCache blockCache; + + @Before + public void setUp() throws IOException { + conf = TEST_UTIL.getConfiguration(); + conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); + fs = HFileSystem.get(conf); + File testDir = new File(name.getMethodName()); + testDir.mkdir(); + conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:/" + testDir.getAbsolutePath() + "/bucket.cache"); + conf.setLong(BUCKET_CACHE_SIZE_KEY, 200); + blockCache = BlockCacheFactory.createBlockCache(conf); + cacheConf = new CacheConfig(conf, blockCache); + } + + @After + public void tearDown() { + File cacheFile = new File(name.getMethodName() + "/bucket.cache"); + File dir = new File(name.getMethodName()); + cacheFile.delete(); + dir.delete(); + } + + @Test + public void testPrefetchDoesntOverwork() throws Exception { + Path storeFile = writeStoreFile("TestPrefetchDoesntOverwork"); + // Prefetches the file blocks + LOG.debug("First read should prefetch the blocks."); + readStoreFile(storeFile); + BucketCache bc = BucketCache.getBuckedCacheFromCacheConfig(cacheConf).get(); + // Our file should have 6 DATA blocks. We should wait for all of them to be cached + Waiter.waitFor(conf, 300, () -> bc.getBackingMap().size() == 6); + Map snapshot = ImmutableMap.copyOf(bc.getBackingMap()); + // Reads file again and check we are not prefetching it again + LOG.debug("Second read, no prefetch should happen here."); + readStoreFile(storeFile); + // Makes sure the cache hasn't changed + snapshot.entrySet().forEach(e -> { + BucketEntry entry = bc.getBackingMap().get(e.getKey()); + assertNotNull(entry); + assertEquals(e.getValue().getCachedTime(), entry.getCachedTime()); + }); + // forcibly removes first block from the bc backing map, in order to cause it to be cached again + BlockCacheKey key = snapshot.keySet().stream().findFirst().get(); + LOG.debug("removing block {}", key); + bc.getBackingMap().remove(key); + bc.getFullyCachedFiles().remove(storeFile.getName()); + assertTrue(snapshot.size() > bc.getBackingMap().size()); + LOG.debug("Third read should prefetch again, as we removed one block for the file."); + readStoreFile(storeFile); + Waiter.waitFor(conf, 300, () -> snapshot.size() == bc.getBackingMap().size()); + assertTrue(snapshot.get(key).getCachedTime() < bc.getBackingMap().get(key).getCachedTime()); + } + + private void readStoreFile(Path storeFilePath) throws Exception { + readStoreFile(storeFilePath, (r, o) -> { + HFileBlock block = null; + try { + block = r.readBlock(o, -1, false, true, false, true, null, null); + } catch (IOException e) { + fail(e.getMessage()); + } + return block; + }, (key, block) -> { + boolean isCached = blockCache.getBlock(key, true, false, true) != null; + if ( + block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX + || block.getBlockType() == BlockType.INTERMEDIATE_INDEX + ) { + assertTrue(isCached); + } + }); + } + + private void readStoreFile(Path storeFilePath, + BiFunction readFunction, + BiConsumer validationFunction) throws Exception { + // Open the file + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); + + while (!reader.prefetchComplete()) { + // Sleep for a bit + Thread.sleep(1000); + } + long offset = 0; + while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { + HFileBlock block = readFunction.apply(reader, offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + validationFunction.accept(blockCacheKey, block); + offset += block.getOnDiskSizeWithHeader(); + } + } + + private Path writeStoreFile(String fname) throws IOException { + HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); + return writeStoreFile(fname, meta); + } + + private Path writeStoreFile(String fname, HFileContext context) throws IOException { + Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), fname); + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) + .withOutputDir(storeFileParentDir).withFileContext(context).build(); + Random rand = ThreadLocalRandom.current(); + final int rowLen = 32; + for (int i = 0; i < NUM_KV; ++i) { + byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i); + byte[] v = RandomKeyValueUtil.randomValue(rand); + int cfLen = rand.nextInt(k.length - rowLen + 1); + KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, + k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length); + sfw.append(kv); + } + + sfw.close(); + return sfw.getPath(); + } + + public static KeyValue.Type generateKeyType(Random rand) { + if (rand.nextBoolean()) { + // Let's make half of KVs puts. + return KeyValue.Type.Put; + } else { + KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; + if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { + throw new RuntimeException("Generated an invalid key type: " + keyType + ". " + + "Probably the layout of KeyValue.Type has changed."); + } + return keyType; + } + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index ad381a665c3b..0cbafedc7c53 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; @@ -298,12 +299,17 @@ public void testRetrieveFromFile() throws Exception { testRetrievalUtils(testDir, ioEngineName); int[] smallBucketSizes = new int[] { 3 * 1024, 5 * 1024 }; String persistencePath = testDir + "/bucket.persistence"; - BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - smallBucketSizes, writeThreads, writerQLen, persistencePath); - assertFalse(new File(persistencePath).exists()); - assertEquals(0, bucketCache.getAllocator().getUsedSize()); - assertEquals(0, bucketCache.backingMap.size()); - HBASE_TESTING_UTILITY.cleanupTestDir(); + BucketCache bucketCache = null; + try { + bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + smallBucketSizes, writeThreads, writerQLen, persistencePath); + assertFalse(new File(persistencePath).exists()); + assertEquals(0, bucketCache.getAllocator().getUsedSize()); + assertEquals(0, bucketCache.backingMap.size()); + } finally { + bucketCache.shutdown(); + HBASE_TESTING_UTILITY.cleanupTestDir(); + } } @Test @@ -319,21 +325,28 @@ public void testRetrieveFromPMem() throws Exception { final String ioEngineName = "pmem:" + testDir + "/bucket.cache"; testRetrievalUtils(testDir, ioEngineName); int[] smallBucketSizes = new int[] { 3 * 1024, 5 * 1024 }; - String persistencePath = testDir + "/bucket.persistence"; - BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - smallBucketSizes, writeThreads, writerQLen, persistencePath); - assertFalse(new File(persistencePath).exists()); - assertEquals(0, bucketCache.getAllocator().getUsedSize()); - assertEquals(0, bucketCache.backingMap.size()); - HBASE_TESTING_UTILITY.cleanupTestDir(); + String persistencePath = testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime(); + BucketCache bucketCache = null; + try { + bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + smallBucketSizes, writeThreads, writerQLen, persistencePath); + assertFalse(new File(persistencePath).exists()); + assertEquals(0, bucketCache.getAllocator().getUsedSize()); + assertEquals(0, bucketCache.backingMap.size()); + } finally { + bucketCache.shutdown(); + HBASE_TESTING_UTILITY.cleanupTestDir(); + } } private void testRetrievalUtils(Path testDir, String ioEngineName) throws IOException, InterruptedException { - final String persistencePath = testDir + "/bucket.persistence"; - BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, persistencePath); + final String persistencePath = + testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime(); + BucketCache bucketCache = null; try { + bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, persistencePath); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); @@ -353,7 +366,9 @@ private void testRetrievalUtils(Path testDir, String ioEngineName) assertFalse(new File(persistencePath).exists()); assertEquals(usedSize, bucketCache.getAllocator().getUsedSize()); } finally { - bucketCache.shutdown(); + if (bucketCache != null) { + bucketCache.shutdown(); + } } assertTrue(new File(persistencePath).exists()); } @@ -382,12 +397,17 @@ public void testRetrieveFromMultipleFiles() throws Exception { testRetrievalUtils(testDirInitial, ioEngineName); int[] smallBucketSizes = new int[] { 3 * 1024, 5 * 1024 }; String persistencePath = testDirInitial + "/bucket.persistence"; - BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - smallBucketSizes, writeThreads, writerQLen, persistencePath); - assertFalse(new File(persistencePath).exists()); - assertEquals(0, bucketCache.getAllocator().getUsedSize()); - assertEquals(0, bucketCache.backingMap.size()); - HBASE_TESTING_UTILITY.cleanupTestDir(); + BucketCache bucketCache = null; + try { + bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + smallBucketSizes, writeThreads, writerQLen, persistencePath); + assertFalse(new File(persistencePath).exists()); + assertEquals(0, bucketCache.getAllocator().getUsedSize()); + assertEquals(0, bucketCache.backingMap.size()); + } finally { + bucketCache.shutdown(); + HBASE_TESTING_UTILITY.cleanupTestDir(); + } } @Test @@ -572,7 +592,7 @@ public void testOffsetProducesPositiveOutput() { // This number is picked because it produces negative output if the values isn't ensured to be // positive. See HBASE-18757 for more information. long testValue = 549888460800L; - BucketEntry bucketEntry = new BucketEntry(testValue, 10, 10L, true, (entry) -> { + BucketEntry bucketEntry = new BucketEntry(testValue, 10, 10, 10L, true, (entry) -> { return ByteBuffAllocator.NONE; }, ByteBuffAllocator.HEAP); assertEquals(testValue, bucketEntry.offset()); @@ -701,8 +721,8 @@ public void testRAMCache() { HFileBlock.FILL_HEADER, -1, 52, -1, meta, ByteBuffAllocator.HEAP); HFileBlock blk2 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, ByteBuffAllocator.HEAP); - RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false); - RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false); + RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false, false); + RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false, false); assertFalse(cache.containsKey(key1)); assertNull(cache.putIfAbsent(key1, re1)); @@ -749,7 +769,7 @@ public void testFreeBlockWhenIOEngineWriteFailure() throws IOException { BucketAllocator allocator = new BucketAllocator(availableSpace, null); BlockCacheKey key = new BlockCacheKey("dummy", 1L); - RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true); + RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, false); Assert.assertEquals(0, allocator.getUsedSize()); try { @@ -768,13 +788,14 @@ public void testFreeBlockWhenIOEngineWriteFailure() throws IOException { */ @Test public void testFreeBucketEntryRestoredFromFile() throws Exception { + BucketCache bucketCache = null; try { final Path dataTestDir = createAndGetTestDir(); String ioEngineName = "file:" + dataTestDir + "/bucketNoRecycler.cache"; String persistencePath = dataTestDir + "/bucketNoRecycler.persistence"; - BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath); long usedByteSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedByteSize); @@ -809,19 +830,21 @@ public void testFreeBucketEntryRestoredFromFile() throws Exception { assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); } finally { + bucketCache.shutdown(); HBASE_TESTING_UTILITY.cleanupTestDir(); } } @Test public void testBlockAdditionWaitWhenCache() throws Exception { + BucketCache bucketCache = null; try { final Path dataTestDir = createAndGetTestDir(); String ioEngineName = "file:" + dataTestDir + "/bucketNoRecycler.cache"; String persistencePath = dataTestDir + "/bucketNoRecycler.persistence"; - BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, 1, 1, persistencePath); long usedByteSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedByteSize); @@ -864,6 +887,9 @@ public void testBlockAdditionWaitWhenCache() throws Exception { assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); } finally { + if (bucketCache != null) { + bucketCache.shutdown(); + } HBASE_TESTING_UTILITY.cleanupTestDir(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java index dbd3d7f86646..bd69f28e1eac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor; import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -85,7 +84,6 @@ public Configuration setupBucketCacheConfig(long bucketCachePersistInterval) thr } public BucketCache setupBucketCache(Configuration conf) throws IOException { - conf.set(CacheConfig.PREFETCH_PERSISTENCE_PATH_KEY, (testDir + "/prefetch.persistence")); BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", 60 * 1000, conf); @@ -111,9 +109,7 @@ public void testPrefetchPersistenceCrash() throws Exception { readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache); readStoreFile(storeFile2, 0, fs, cacheConf, conf, bucketCache); Thread.sleep(bucketCachePersistInterval); - assertTrue(new File(testDir + "/prefetch.persistence").exists()); assertTrue(new File(testDir + "/bucket.persistence").exists()); - assertTrue(new File(testDir + "/prefetch.persistence").delete()); assertTrue(new File(testDir + "/bucket.persistence").delete()); cleanupBucketCache(bucketCache); } @@ -128,7 +124,6 @@ public void testPrefetchPersistenceCrashNegative() throws Exception { // Load Cache Path storeFile = writeStoreFile("TestPrefetch2", conf, cacheConf, fs); readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache); - assertFalse(new File(testDir + "/prefetch.persistence").exists()); assertFalse(new File(testDir + "/bucket.persistence").exists()); cleanupBucketCache(bucketCache); } @@ -144,10 +139,10 @@ public void testPrefetchListUponBlockEviction() throws Exception { readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache1); Thread.sleep(500); // Evict Blocks from cache + assertTrue(bucketCache1.fullyCachedFiles.containsKey(storeFile.getName())); BlockCacheKey bucketCacheKey = bucketCache1.backingMap.entrySet().iterator().next().getKey(); - assertTrue(PrefetchExecutor.isFilePrefetched(storeFile.getName())); bucketCache1.evictBlock(bucketCacheKey); - assertFalse(PrefetchExecutor.isFilePrefetched(storeFile.getName())); + assertFalse(bucketCache1.fullyCachedFiles.containsKey(storeFile.getName())); } public void readStoreFile(Path storeFilePath, long offset, FileSystem fs, CacheConfig cacheConf, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java index 820e91aa6e81..b42e7be804db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java @@ -49,7 +49,7 @@ private static class MockBucketEntry extends BucketEntry { private long off; MockBucketEntry(long offset, int length, ByteBuffAllocator allocator) { - super(offset & 0xFF00, length, 0, false, (entry) -> { + super(offset & 0xFF00, length, length, 0, false, (entry) -> { return ByteBuffAllocator.NONE; }, allocator); this.off = offset; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java index 771ab0158f61..f15874bc61c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor; import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -106,8 +105,6 @@ public void setup() throws IOException { conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); testDir = TEST_UTIL.getDataTestDir(); TEST_UTIL.getTestFileSystem().mkdirs(testDir); - prefetchPersistencePath = testDir + "/prefetch.persistence"; - conf.set(CacheConfig.PREFETCH_PERSISTENCE_PATH_KEY, prefetchPersistencePath); fs = HFileSystem.get(conf); } @@ -132,10 +129,10 @@ public void testPrefetchPersistence() throws Exception { bucketCache.shutdown(); assertTrue(new File(testDir + "/bucket.persistence").exists()); - assertTrue(new File(testDir + "/prefetch.persistence").exists()); bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", 60 * 1000, conf); + cacheConf = new CacheConfig(conf, bucketCache); assertFalse(new File(testDir + "/bucket.persistence").exists()); assertFalse(new File(testDir + "/prefetch.persistence").exists()); assertTrue(usedSize != 0); @@ -148,9 +145,9 @@ public void testPrefetchPersistence() throws Exception { public void closeStoreFile(Path path) throws Exception { HFile.Reader reader = HFile.createReader(fs, path, cacheConf, true, conf); - assertTrue(PrefetchExecutor.isFilePrefetched(path.getName())); + assertTrue(bucketCache.fullyCachedFiles.containsKey(path.getName())); reader.close(true); - assertFalse(PrefetchExecutor.isFilePrefetched(path.getName())); + assertFalse(bucketCache.fullyCachedFiles.containsKey(path.getName())); } public void readStoreFile(Path storeFilePath, long offset) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java index 0e777a4a7b9f..58d9385f57e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java @@ -90,7 +90,7 @@ public void testAtomicRAMCache() throws Exception { MockHFileBlock blk = new MockHFileBlock(BlockType.DATA, size, size, -1, ByteBuffer.wrap(byteArr, 0, size), HFileBlock.FILL_HEADER, -1, 52, -1, new HFileContextBuilder().build(), ByteBuffAllocator.HEAP); - RAMQueueEntry re = new RAMQueueEntry(key, blk, 1, false); + RAMQueueEntry re = new RAMQueueEntry(key, blk, 1, false, false); Assert.assertNull(cache.putIfAbsent(key, re)); Assert.assertEquals(cache.putIfAbsent(key, re), re); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java index 3b2b9961b2b7..6fdea844aa32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java @@ -17,11 +17,15 @@ */ package org.apache.hadoop.hbase.io.hfile.bucket; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY; +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.io.BufferedWriter; +import java.io.File; import java.io.FileOutputStream; import java.io.OutputStreamWriter; import java.nio.file.FileSystems; @@ -32,12 +36,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -147,16 +154,15 @@ public void testRetrieveFromFile() throws Exception { @Test public void testRetrieveFromFileAfterDelete() throws Exception { - HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); Path testDir = TEST_UTIL.getDataTestDir(); TEST_UTIL.getTestFileSystem().mkdirs(testDir); Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY, 300); - - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, - testDir + "/bucket.persistence", 60 * 1000, conf); + String mapFileName = testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime(); + BucketCache bucketCache = + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, mapFileName, 60 * 1000, conf); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); @@ -171,14 +177,13 @@ public void testRetrieveFromFileAfterDelete() throws Exception { // Shutdown BucketCache bucketCache.shutdown(); // Delete the persistence file - final java.nio.file.Path mapFile = - FileSystems.getDefault().getPath(testDir.toString(), "bucket.persistence"); - assertTrue(Files.deleteIfExists(mapFile)); + File mapFile = new File(mapFileName); + assertTrue(mapFile.delete()); Thread.sleep(350); // Create BucketCache - bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, - testDir + "/bucket.persistence", 60 * 1000, conf); + bucketCache = + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, mapFileName, 60 * 1000, conf); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); } @@ -232,9 +237,15 @@ public void testModifiedBucketCacheFileData() throws Exception { /** * Test whether BucketCache is started normally after modifying the cache file's last modified * time. First Start BucketCache and add some blocks, then shutdown BucketCache and persist cache - * to file. Then Restart BucketCache after modify cache file's last modified time, and it can't - * restore cache from file, the cache file and persistence file would be deleted before - * BucketCache start normally. + * to file. Then Restart BucketCache after modify cache file's last modified time. HBASE-XXXX has + * modified persistence cache such that now we store extra 8 bytes at the end of each block in the + * cache, representing the nanosecond time the block has been cached. So in the event the cache + * file has failed checksum verification during loading time, we go through all the cached blocks + * in the cache map and validate the cached time long between what is in the map and the cache + * file. If that check fails, we pull the cache key entry out of the map. Since in this test we + * are only modifying the access time to induce a checksum error, the cache file content is still + * valid and the extra verification should validate that all cache keys in the map are still + * recoverable from the cache. * @throws Exception the exception */ @Test @@ -249,6 +260,8 @@ public void testModifiedBucketCacheFileTime() throws Exception { long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); + Pair myPair = new Pair<>(); + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); // Add blocks @@ -257,6 +270,8 @@ public void testModifiedBucketCacheFileTime() throws Exception { } usedSize = bucketCache.getAllocator().getUsedSize(); assertNotEquals(0, usedSize); + long blockCount = bucketCache.backingMap.size(); + assertNotEquals(0, blockCount); // persist cache to file bucketCache.shutdown(); @@ -268,9 +283,64 @@ public void testModifiedBucketCacheFileTime() throws Exception { bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); - assertEquals(0, bucketCache.getAllocator().getUsedSize()); - assertEquals(0, bucketCache.backingMap.size()); + assertEquals(usedSize, bucketCache.getAllocator().getUsedSize()); + assertEquals(blockCount, bucketCache.backingMap.size()); + + TEST_UTIL.cleanupTestDir(); + } + + /** + * When using persistent bucket cache, there may be crashes between persisting the backing map and + * syncing new blocks to the cache file itself, leading to an inconsistent state between the cache + * keys and the cached data. This is to make sure the cache keys are updated accordingly, and the + * keys that are still valid do succeed in retrieve related block data from the cache without any + * corruption. + * @throws Exception the exception + */ + @Test + public void testBucketCacheRecovery() throws Exception { + HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + Path testDir = TEST_UTIL.getDataTestDir(); + TEST_UTIL.getTestFileSystem().mkdirs(testDir); + Configuration conf = HBaseConfiguration.create(); + // Disables the persister thread by setting its interval to MAX_VALUE + conf.setLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, Long.MAX_VALUE); + String mapFileName = testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime(); + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, mapFileName, + DEFAULT_ERROR_TOLERATION_DURATION, conf); + + CacheTestUtils.HFileBlockPair[] blocks = + CacheTestUtils.generateHFileBlocks(constructedBlockSize, 4); + // Add three blocks + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[0].getBlockName(), blocks[0].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[1].getBlockName(), blocks[1].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[2].getBlockName(), blocks[2].getBlock()); + // saves the current state + bucketCache.persistToFile(); + // evicts first block + bucketCache.evictBlock(blocks[0].getBlockName()); + + // now adds a fourth block to bucket cache + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[3].getBlockName(), blocks[3].getBlock()); + // Creates new bucket cache instance without persisting to file after evicting first block + // and caching fourth block. So the bucket cache file has only the last three blocks, + // but backing map (containing cache keys) was persisted when first three blocks + // were in the cache. So the state on this recovery is: + // - Backing map: [block0, block1, block2] + // - Cache: [block1, block2, block3] + // Therefore, this bucket cache would be able to recover only block1 and block2. + BucketCache newBucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, mapFileName, + DEFAULT_ERROR_TOLERATION_DURATION, conf); + assertNull(newBucketCache.getBlock(blocks[0].getBlockName(), false, false, false)); + assertEquals(blocks[1].getBlock(), + newBucketCache.getBlock(blocks[1].getBlockName(), false, false, false)); + assertEquals(blocks[2].getBlock(), + newBucketCache.getBlock(blocks[2].getBlockName(), false, false, false)); + assertNull(newBucketCache.getBlock(blocks[3].getBlockName(), false, false, false)); + assertEquals(2, newBucketCache.backingMap.size()); TEST_UTIL.cleanupTestDir(); } From 89ca7f4ade84c84a246281c71898543b6161c099 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Wed, 23 Aug 2023 14:10:00 -0400 Subject: [PATCH 059/514] HBASE-28010 Connection attributes can become corrupted on the server side (#5366) Signed-off-by: Bryan Beaudreault --- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 16 +++++++- .../apache/hadoop/hbase/ipc/ServerCall.java | 27 +++++++++++-- .../hadoop/hbase/ipc/ServerRpcConnection.java | 18 +++++++++ .../TestRequestAndConnectionAttributes.java | 38 +++++++++++-------- .../namequeues/TestNamedQueueRecorder.java | 8 +++- .../region/TestRegionProcedureStore.java | 8 +++- 6 files changed, 92 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index cc97a39c7ee4..0555202f88b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; @@ -27,7 +29,6 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** @@ -83,7 +84,18 @@ public interface RpcCall extends RpcCallContext { /** Returns The request header of this call. */ RequestHeader getHeader(); - ConnectionHeader getConnectionHeader(); + /** + * Returns the map of attributes specified when building the Connection. + * @see org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration, + * ExecutorService, User, Map) + */ + Map getConnectionAttributes(); + + /** + * Returns the map of attributes specified when building the request. + * @see org.apache.hadoop.hbase.client.TableBuilder#setRequestAttribute(String, byte[]) + */ + Map getRequestAttributes(); /** Returns Port of remote address in this call */ int getRemotePort(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index f3568a36f144..66a2e44fac19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -24,7 +24,9 @@ import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.CellScanner; @@ -42,14 +44,15 @@ import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; @@ -99,6 +102,7 @@ public abstract class ServerCall implements RpcCa // cumulative size of serialized exceptions private long exceptionSize = 0; private final boolean retryImmediatelySupported; + private volatile Map requestAttributes; // This is a dirty hack to address HBASE-22539. The highest bit is for rpc ref and cleanup, and // the rest of the bits are for WAL reference count. We can only call release if all of them are @@ -209,8 +213,25 @@ public RequestHeader getHeader() { } @Override - public RPCProtos.ConnectionHeader getConnectionHeader() { - return this.connection.connectionHeader; + public Map getConnectionAttributes() { + return this.connection.connectionAttributes; + } + + @Override + public Map getRequestAttributes() { + if (this.requestAttributes == null) { + if (header.getAttributeList().isEmpty()) { + this.requestAttributes = Collections.emptyMap(); + } else { + Map requestAttributes = + Maps.newHashMapWithExpectedSize(header.getAttributeList().size()); + for (HBaseProtos.NameBytesPair nameBytesPair : header.getAttributeList()) { + requestAttributes.put(nameBytesPair.getName(), nameBytesPair.getValue().toByteArray()); + } + this.requestAttributes = requestAttributes; + } + } + return this.requestAttributes; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index b09f33c47f9a..e0f69e4b84c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -31,6 +31,8 @@ import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; +import java.util.Collections; +import java.util.Map; import java.util.Objects; import java.util.Properties; import org.apache.commons.crypto.cipher.CryptoCipherFactory; @@ -65,6 +67,7 @@ import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.ByteInput; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; @@ -75,6 +78,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; @@ -103,6 +107,7 @@ abstract class ServerRpcConnection implements Closeable { protected int remotePort; protected InetAddress addr; protected ConnectionHeader connectionHeader; + protected Map connectionAttributes; /** * Codec the client asked use. @@ -405,6 +410,19 @@ private CodedInputStream createCis(ByteBuff buf) { // Reads the connection header following version private void processConnectionHeader(ByteBuff buf) throws IOException { this.connectionHeader = ConnectionHeader.parseFrom(createCis(buf)); + + // we want to copy the attributes prior to releasing the buffer so that they don't get corrupted + // eventually + if (connectionHeader.getAttributeList().isEmpty()) { + this.connectionAttributes = Collections.emptyMap(); + } else { + this.connectionAttributes = + Maps.newHashMapWithExpectedSize(connectionHeader.getAttributeList().size()); + for (HBaseProtos.NameBytesPair nameBytesPair : connectionHeader.getAttributeList()) { + this.connectionAttributes.put(nameBytesPair.getName(), + nameBytesPair.getValue().toByteArray()); + } + } String serviceName = connectionHeader.getServiceName(); if (serviceName == null) { throw new EmptyServiceNameException(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java index b376bfc18557..728b877a32b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Random; import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -59,8 +60,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; - @Category({ ClientTests.class, MediumTests.class }) public class TestRequestAndConnectionAttributes { @@ -101,15 +100,22 @@ public void setup() { } @Test - public void testConnectionAttributes() throws IOException { + public void testConnectionHeaderOverwrittenAttributesRemain() throws IOException { TableName tableName = TableName.valueOf("testConnectionAttributes"); - TEST_UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("0") }, 1, - HConstants.DEFAULT_BLOCKSIZE, AttributesCoprocessor.class.getName()); + byte[] cf = Bytes.toBytes("0"); + TEST_UTIL.createTable(tableName, new byte[][] { cf }, 1, HConstants.DEFAULT_BLOCKSIZE, + AttributesCoprocessor.class.getName()); Configuration conf = TEST_UTIL.getConfiguration(); try (Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES); Table table = conn.getTable(tableName)) { - Result result = table.get(new Get(Bytes.toBytes(0))); + + // submit a 300 byte rowkey here to encourage netty's allocator to overwrite the connection + // header + byte[] bytes = new byte[300]; + new Random().nextBytes(bytes); + Result result = table.get(new Get(bytes)); + assertEquals(CONNECTION_ATTRIBUTES.size(), result.size()); for (Map.Entry attr : CONNECTION_ATTRIBUTES.entrySet()) { byte[] val = result.getValue(Bytes.toBytes("c"), Bytes.toBytes(attr.getKey())); @@ -270,15 +276,15 @@ public void preGetOp(ObserverContext c, Get get, // for connection attrs test RpcCall rpcCall = RpcServer.getCurrentCall().get(); - for (HBaseProtos.NameBytesPair attr : rpcCall.getHeader().getAttributeList()) { + for (Map.Entry attr : rpcCall.getRequestAttributes().entrySet()) { result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) - .setFamily(Bytes.toBytes("r")).setQualifier(Bytes.toBytes(attr.getName())) - .setValue(attr.getValue().toByteArray()).setType(Cell.Type.Put).setTimestamp(1).build()); + .setFamily(Bytes.toBytes("r")).setQualifier(Bytes.toBytes(attr.getKey())) + .setValue(attr.getValue()).setType(Cell.Type.Put).setTimestamp(1).build()); } - for (HBaseProtos.NameBytesPair attr : rpcCall.getConnectionHeader().getAttributeList()) { + for (Map.Entry attr : rpcCall.getConnectionAttributes().entrySet()) { result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) - .setFamily(Bytes.toBytes("c")).setQualifier(Bytes.toBytes(attr.getName())) - .setValue(attr.getValue().toByteArray()).setType(Cell.Type.Put).setTimestamp(1).build()); + .setFamily(Bytes.toBytes("c")).setQualifier(Bytes.toBytes(attr.getKey())) + .setValue(attr.getValue()).setType(Cell.Type.Put).setTimestamp(1).build()); } result.sort(CellComparator.getInstance()); c.bypass(); @@ -299,15 +305,15 @@ public void prePut(ObserverContext c, Put put, WAL private void validateRequestAttributes() { RpcCall rpcCall = RpcServer.getCurrentCall().get(); - List attrs = rpcCall.getHeader().getAttributeList(); + Map attrs = rpcCall.getRequestAttributes(); if (attrs.size() != REQUEST_ATTRIBUTES.size()) { return; } - for (HBaseProtos.NameBytesPair attr : attrs) { - if (!REQUEST_ATTRIBUTES.containsKey(attr.getName())) { + for (Map.Entry attr : attrs.entrySet()) { + if (!REQUEST_ATTRIBUTES.containsKey(attr.getKey())) { return; } - if (!Arrays.equals(REQUEST_ATTRIBUTES.get(attr.getName()), attr.getValue().toByteArray())) { + if (!Arrays.equals(REQUEST_ATTRIBUTES.get(attr.getKey()), attr.getValue())) { return; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 7a3ca0b7cf9f..c24b364a2277 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -24,6 +24,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; @@ -695,7 +696,12 @@ public RPCProtos.RequestHeader getHeader() { } @Override - public RPCProtos.ConnectionHeader getConnectionHeader() { + public Map getConnectionAttributes() { + return null; + } + + @Override + public Map getRequestAttributes() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index dd49d00ac3a1..83f788ba1518 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.HashSet; +import java.util.Map; import java.util.Optional; import java.util.Set; import org.apache.hadoop.hbase.CellScanner; @@ -222,7 +223,12 @@ public RPCProtos.RequestHeader getHeader() { } @Override - public RPCProtos.ConnectionHeader getConnectionHeader() { + public Map getConnectionAttributes() { + return null; + } + + @Override + public Map getRequestAttributes() { return null; } From dcc5495a7693eee9e2cca3da4927410ae55036d8 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 27 Aug 2023 14:53:48 +0800 Subject: [PATCH 060/514] HBASE-28028 Read all compressed bytes to a byte array before submitting them to decompressor (#5357) Signed-off-by: Xin Sun --- .../regionserver/wal/CompressionContext.java | 9 +-- ...mpressionBoundedDelegatingInputStream.java | 73 +++++++++---------- 2 files changed, 36 insertions(+), 46 deletions(-) rename hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedDelegatingInputStream.java => hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java (57%) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java index 73cf4821db00..633decab0a82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java @@ -28,7 +28,6 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.io.BoundedDelegatingInputStream; import org.apache.hadoop.hbase.io.TagCompressionContext; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.util.Dictionary; @@ -77,7 +76,7 @@ static class ValueCompressor { private final Compression.Algorithm algorithm; private Compressor compressor; private Decompressor decompressor; - private BoundedDelegatingInputStream lowerIn; + private WALDecompressionBoundedDelegatingInputStream lowerIn; private ByteArrayOutputStream lowerOut; private InputStream compressedIn; private OutputStream compressedOut; @@ -108,20 +107,17 @@ public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) thro public void decompress(InputStream in, int inLength, byte[] outArray, int outOffset, int outLength) throws IOException { - // Our input is a sequence of bounded byte ranges (call them segments), with // BoundedDelegatingInputStream providing a way to switch in a new segment when the // previous segment has been fully consumed. // Create the input streams here the first time around. if (compressedIn == null) { - lowerIn = new BoundedDelegatingInputStream(in, inLength); + lowerIn = new WALDecompressionBoundedDelegatingInputStream(); if (decompressor == null) { decompressor = algorithm.getDecompressor(); } compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, IO_BUFFER_SIZE); - } else { - lowerIn.setDelegate(in, inLength); } if (outLength == 0) { // The BufferedInputStream will return earlier and skip reading anything if outLength == 0, @@ -131,6 +127,7 @@ public void decompress(InputStream in, int inLength, byte[] outArray, int outOff // such as data loss when splitting wal or replicating wal. IOUtils.skipFully(in, inLength); } else { + lowerIn.reset(in, inLength); IOUtils.readFully(compressedIn, outArray, outOffset, outLength); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedDelegatingInputStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java similarity index 57% rename from hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedDelegatingInputStream.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java index 2a6db09050c6..0f4fd78a0b83 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedDelegatingInputStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java @@ -15,75 +15,73 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.io; +package org.apache.hadoop.hbase.regionserver.wal; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import org.apache.commons.io.IOUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is a stream that will only supply bytes from its delegate up to a certain limit. When there - * is an attempt to set the position beyond that it will signal that the input is finished. + * This class is only used by WAL ValueCompressor for decompression. + *

+ * WARNING: The implementation is very tricky and does not follow typical + * InputStream pattern, so do not use it in any other places. */ @InterfaceAudience.Private -public class BoundedDelegatingInputStream extends DelegatingInputStream { +class WALDecompressionBoundedDelegatingInputStream extends InputStream { - protected long limit; - protected long pos; + private static final Logger LOG = + LoggerFactory.getLogger(WALDecompressionBoundedDelegatingInputStream.class); - public BoundedDelegatingInputStream(InputStream in, long limit) { - super(in); - this.limit = limit; - this.pos = 0; - } + private InputStream in; + + private long pos; - public void setDelegate(InputStream in, long limit) { + private long limit; + + public void reset(InputStream in, long limit) { this.in = in; this.limit = limit; this.pos = 0; } - /** - * Call the delegate's {@code read()} method if the current position is less than the limit. - * @return the byte read or -1 if the end of stream or the limit has been reached. - */ @Override public int read() throws IOException { if (pos >= limit) { return -1; } int result = in.read(); + if (result < 0) { + return -1; + } pos++; return result; } - /** - * Call the delegate's {@code read(byte[], int, int)} method if the current position is less than - * the limit. - * @param b read buffer - * @param off Start offset - * @param len The number of bytes to read - * @return the number of bytes read or -1 if the end of stream or the limit has been reached. - */ @Override - public int read(final byte[] b, final int off, final int len) throws IOException { + public int read(byte[] b, int off, int len) throws IOException { if (pos >= limit) { return -1; } - long readLen = Math.min(len, limit - pos); - int read = in.read(b, off, (int) readLen); - if (read < 0) { + int readLen = (int) Math.min(len, limit - pos); + try { + IOUtils.readFully(in, b, off, readLen); + } catch (EOFException e) { + // This is trick here, we will always try to read enough bytes to fill the buffer passed in, + // or we reach the end of this compression block, if there are not enough bytes, we just + // return -1 to let the upper layer fail with EOF + // In WAL value decompression this is OK as if we can not read all the data, we will finally + // get an EOF somewhere + LOG.debug("Got EOF while we want to read {} bytes from stream", readLen, e); return -1; } - pos += read; - return read; + return readLen; } - /** - * Call the delegate's {@code skip(long)} method. - * @param len the number of bytes to skip - * @return the actual number of bytes skipped - */ @Override public long skip(final long len) throws IOException { long skipped = in.skip(Math.min(len, limit - pos)); @@ -91,10 +89,6 @@ public long skip(final long len) throws IOException { return skipped; } - /** - * @return the remaining bytes within the bound if the current position is less than the limit, or - * 0 otherwise. - */ @Override public int available() throws IOException { if (pos >= limit) { @@ -108,5 +102,4 @@ public int available() throws IOException { // successful decompression depends on this behavior. return (int) (limit - pos); } - } From 5527dd94537250bbe76dd7b074063c32f7ec29db Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sun, 27 Aug 2023 19:27:05 -0700 Subject: [PATCH 061/514] HBASE-28042 Snapshot corruptions due to non-atomic rename within same filesystem (#5369) Co-authored-by: Ujjawal Signed-off-by: Wellington Chevreuil Signed-off-by: Abhey Rana Signed-off-by: Ujjawal Signed-off-by: Aman Poonia --- .../snapshot/SnapshotDescriptionUtils.java | 37 ++++++++++++-- .../TestSnapshotScannerHDFSAclController.java | 13 +++-- .../TestSnapshotDescriptionUtils.java | 49 +++++++++++++++++++ 3 files changed, 88 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 1e2bbb68c41d..689cd89259ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -419,11 +419,9 @@ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSyste // if this fails URI workingURI = workingDirFs.getUri(); URI rootURI = fs.getUri(); + if ( - (!workingURI.getScheme().equals(rootURI.getScheme()) || workingURI.getAuthority() == null - || !workingURI.getAuthority().equals(rootURI.getAuthority()) - || workingURI.getUserInfo() == null - || !workingURI.getUserInfo().equals(rootURI.getUserInfo()) + (shouldSkipRenameSnapshotDirectories(workingURI, rootURI) || !fs.rename(workingDir, snapshotDir)) && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf) ) { @@ -432,6 +430,37 @@ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSyste } } + static boolean shouldSkipRenameSnapshotDirectories(URI workingURI, URI rootURI) { + // check scheme, e.g. file, hdfs + if (workingURI.getScheme() == null && rootURI.getScheme() != null) { + return true; + } + if (workingURI.getScheme() != null && !workingURI.getScheme().equals(rootURI.getScheme())) { + return true; + } + + // check Authority, e.g. localhost:port + if (workingURI.getAuthority() == null && rootURI.getAuthority() != null) { + return true; + } + if ( + workingURI.getAuthority() != null && !workingURI.getAuthority().equals(rootURI.getAuthority()) + ) { + return true; + } + + // check UGI/userInfo + if (workingURI.getUserInfo() == null && rootURI.getUserInfo() != null) { + return true; + } + if ( + workingURI.getUserInfo() != null && !workingURI.getUserInfo().equals(rootURI.getUserInfo()) + ) { + return true; + } + return false; + } + /** * Check if the user is this table snapshot's owner * @param snapshot the table snapshot description diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java index d79e3f308104..99d5a89ac2c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java @@ -158,6 +158,7 @@ public void testGrantGlobal1() throws Exception { TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); snapshotAndWait(snapshot1, table); + snapshotAndWait(snapshot2, table); // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); @@ -174,8 +175,6 @@ public void testGrantGlobal1() throws Exception { // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - // take a snapshot and ACLs are inherited automatically - snapshotAndWait(snapshot2, table); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); deleteTable(table); @@ -197,10 +196,10 @@ public void testGrantGlobal2() throws Exception { // create table in namespace1 and snapshot TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); snapshotAndWait(snapshot1, table1); - admin.grant(new UserPermission(grantUserName, - Permission.newBuilder(namespace1).withActions(READ).build()), false); // grant G(W) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); + admin.grant(new UserPermission(grantUserName, + Permission.newBuilder(namespace1).withActions(READ).build()), false); // create table in namespace2 and snapshot TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); snapshotAndWait(snapshot2, table2); @@ -231,11 +230,11 @@ public void testGrantGlobal3() throws Exception { // grant table1(R) TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); snapshotAndWait(snapshot1, table1); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); - // grant G(W) - SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); snapshotAndWait(snapshot2, table2); + // grant G(W) + SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java index 2093baf36075..8e62ef16bbfe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java @@ -22,6 +22,7 @@ import static org.junit.Assert.fail; import java.io.IOException; +import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -191,4 +192,52 @@ public void testIsWithinWorkingDir() throws IOException { assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( new Path("file:" + hbsaeDir + "/.hbase-snapshot/.tmp/snapshot"), conf)); } + + @Test + public void testShouldSkipRenameSnapshotDirectories() { + URI workingDirURI = URI.create("/User/test1"); + URI rootDirURI = URI.create("hdfs:///User/test2"); + + // should skip rename if it's not the same scheme; + assertTrue( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + workingDirURI = URI.create("/User/test1"); + rootDirURI = URI.create("file:///User/test2"); + assertTrue( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + // skip rename when either scheme or authority are the not same + workingDirURI = URI.create("hdfs://localhost:8020/User/test1"); + rootDirURI = URI.create("hdfs://otherhost:8020/User/test2"); + assertTrue( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + workingDirURI = URI.create("file:///User/test1"); + rootDirURI = URI.create("hdfs://localhost:8020/User/test2"); + assertTrue( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + workingDirURI = URI.create("hdfs:///User/test1"); + rootDirURI = URI.create("hdfs:///User/test2"); + assertFalse( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + workingDirURI = URI.create("hdfs://localhost:8020/User/test1"); + rootDirURI = URI.create("hdfs://localhost:8020/User/test2"); + assertFalse( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + workingDirURI = URI.create("hdfs://user:password@localhost:8020/User/test1"); + rootDirURI = URI.create("hdfs://user:password@localhost:8020/User/test2"); + assertFalse( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + + // skip rename when user information is not the same + workingDirURI = URI.create("hdfs://user:password@localhost:8020/User/test1"); + rootDirURI = URI.create("hdfs://user2:password2@localhost:8020/User/test2"); + assertTrue( + SnapshotDescriptionUtils.shouldSkipRenameSnapshotDirectories(workingDirURI, rootDirURI)); + } + } From fd33c14e1ccb151becf13cd406c159f65594eb19 Mon Sep 17 00:00:00 2001 From: guluo Date: Thu, 31 Aug 2023 21:33:17 +0800 Subject: [PATCH 062/514] HBASE-28051 The javadoc about RegionProcedureStore.delete is incorrect (#5377) Signed-off-by: Duo Zhang --- .../hbase/procedure2/store/region/RegionProcedureStore.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java index 93fbe8c95fef..9cc42bf587ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java @@ -71,7 +71,7 @@ * A procedure store which uses the master local store to store all the procedures. *

* We use proc:d column to store the serialized protobuf format procedure, and when deleting we will - * first fill the info:proc column with an empty byte array, and then actually delete them in the + * first fill the proc:d column with an empty byte array, and then actually delete them in the * {@link #cleanup()} method. This is because that we need to retain the max procedure id, so we can * not directly delete a procedure row as we do not know if it is the one with the max procedure id. */ From 121c8e17ecea66267fe77bab078f79d14a74a832 Mon Sep 17 00:00:00 2001 From: guluo Date: Thu, 31 Aug 2023 21:33:45 +0800 Subject: [PATCH 063/514] HBASE-28052 Removing the useless parameters from ProcedureExecutor.loadProcedures (#5378) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/procedure2/ProcedureExecutor.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index f6263a5530bb..696dbb71b9f9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -332,7 +332,7 @@ public void setMaxProcId(long maxProcId) { @Override public void load(ProcedureIterator procIter) throws IOException { - loadProcedures(procIter, abortOnCorruption); + loadProcedures(procIter); } @Override @@ -394,8 +394,7 @@ private void restoreLocks() { }); } - private void loadProcedures(ProcedureIterator procIter, boolean abortOnCorruption) - throws IOException { + private void loadProcedures(ProcedureIterator procIter) throws IOException { // 1. Build the rollback stack int runnableCount = 0; int failedCount = 0; From 198385aa7b10cc09b19ab6c12948340abbe6ef25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andor=20Moln=C3=A1r?= Date: Tue, 5 Sep 2023 11:03:50 +0200 Subject: [PATCH 064/514] HBASE-28038 Add TLS settings to ZooKeeper client (#5370) Signed-off-by: Wellington Chevreuil Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi Reviewed-by: Istvan Toth --- .../hadoop/hbase/zookeeper/ZKConfig.java | 34 +++++++++++++++ .../hadoop/hbase/zookeeper/TestZKConfig.java | 43 +++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 32cfde410d56..12d81fee6586 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map.Entry; import java.util.Properties; +import java.util.Set; import org.apache.commons.validator.routines.InetAddressValidator; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -28,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; /** * Utility methods for reading, and building the ZooKeeper configuration. The order and priority for @@ -38,6 +40,13 @@ public final class ZKConfig { private static final String VARIABLE_START = "${"; + private static final String ZOOKEEPER_JAVA_PROPERTY_PREFIX = "zookeeper."; + + /** Supported ZooKeeper client TLS properties */ + static final Set ZOOKEEPER_CLIENT_TLS_PROPERTIES = + ImmutableSet.of("client.secure", "clientCnxnSocket", "ssl.keyStore.location", + "ssl.keyStore.password", "ssl.keyStore.passwordPath", "ssl.trustStore.location", + "ssl.trustStore.password", "ssl.trustStore.passwordPath"); private ZKConfig() { } @@ -123,6 +132,7 @@ private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf * @return Quorum servers */ public static String getZKQuorumServersString(Configuration conf) { + setZooKeeperClientSystemProperties(HConstants.ZK_CFG_PROPERTY_PREFIX, conf); return getZKQuorumServersStringFromHbaseConfig(conf); } @@ -318,6 +328,7 @@ public String getZnodeParent() { * @return Client quorum servers, or null if not specified */ public static String getClientZKQuorumServersString(Configuration conf) { + setZooKeeperClientSystemProperties(HConstants.ZK_CFG_PROPERTY_PREFIX, conf); String clientQuromServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); if (clientQuromServers == null) { return null; @@ -330,4 +341,27 @@ public static String getClientZKQuorumServersString(Configuration conf) { final String[] serverHosts = StringUtils.getStrings(clientQuromServers); return buildZKQuorumServerString(serverHosts, clientZkClientPort); } + + private static void setZooKeeperClientSystemProperties(String prefix, Configuration conf) { + synchronized (conf) { + for (Entry entry : conf) { + String key = entry.getKey(); + if (!key.startsWith(prefix)) { + continue; + } + String zkKey = key.substring(prefix.length()); + if (!ZOOKEEPER_CLIENT_TLS_PROPERTIES.contains(zkKey)) { + continue; + } + String value = entry.getValue(); + // If the value has variables substitutions, need to do a get. + if (value.contains(VARIABLE_START)) { + value = conf.get(key); + } + if (System.getProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + zkKey) == null) { + System.setProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + zkKey, value); + } + } + } + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java index 381f78f055e9..7418afe5d222 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.zookeeper; +import static org.apache.hadoop.hbase.zookeeper.ZKConfig.ZOOKEEPER_CLIENT_TLS_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -90,6 +91,48 @@ public void testClusterKeyWithMultiplePorts() throws Exception { testKey("server1:2182,server2:2183,server1", 2181, "/hbase", true); } + @Test + public void testZooKeeperTlsPropertiesClient() { + // Arrange + Configuration conf = HBaseConfiguration.create(); + for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { + conf.set(HConstants.ZK_CFG_PROPERTY_PREFIX + p, p); + String zkprop = "zookeeper." + p; + System.clearProperty(zkprop); + } + + // Act + ZKConfig.getClientZKQuorumServersString(conf); + + // Assert + for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { + String zkprop = "zookeeper." + p; + assertEquals("Invalid or unset system property: " + zkprop, p, System.getProperty(zkprop)); + System.clearProperty(zkprop); + } + } + + @Test + public void testZooKeeperTlsPropertiesServer() { + // Arrange + Configuration conf = HBaseConfiguration.create(); + for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { + conf.set(HConstants.ZK_CFG_PROPERTY_PREFIX + p, p); + String zkprop = "zookeeper." + p; + System.clearProperty(zkprop); + } + + // Act + ZKConfig.getZKQuorumServersString(conf); + + // Assert + for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { + String zkprop = "zookeeper." + p; + assertEquals("Invalid or unset system property: " + zkprop, p, System.getProperty(zkprop)); + System.clearProperty(zkprop); + } + } + private void testKey(String ensemble, int port, String znode) throws IOException { testKey(ensemble, port, znode, false); // not support multiple client ports } From 33e7e5315b4244196e1652069bf7f8662103969c Mon Sep 17 00:00:00 2001 From: Sergey Soldatov Date: Tue, 5 Sep 2023 16:59:28 -0700 Subject: [PATCH 065/514] HBASE-28055 Performance improvement for scan over several stores. (#5379) Signed-off-by: Ankit Singhal Signed-off-by: Tak Lon (Stephen) Wu --- .../org/apache/hadoop/hbase/regionserver/StoreScanner.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 2b6ac583ff66..c12307841a2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -935,7 +935,9 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { // We need this check because it may happen that the new scanner that we get // during heap.next() is requiring reseek due of fake KV previously generated for // ROWCOL bloom filter optimization. See HBASE-19863 for more details - if (useRowColBloom && nextCell != null && matcher.compareKeyForNextColumn(nextCell, cell) < 0) { + if ( + useRowColBloom && nextCell != null && cell.getTimestamp() == PrivateConstants.OLDEST_TIMESTAMP + ) { return false; } return true; From 97d512be7c9802fbba7890a7ff34a0e882cd8a84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andor=20Moln=C3=A1r?= Date: Wed, 6 Sep 2023 12:46:42 +0200 Subject: [PATCH 066/514] HBASE-28063 Document how to configure TLS settings to ZooKeeper client (#5383) Signed-off-by: nihaljain@apache.org --- src/main/asciidoc/_chapters/zookeeper.adoc | 67 ++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/src/main/asciidoc/_chapters/zookeeper.adoc b/src/main/asciidoc/_chapters/zookeeper.adoc index 98fc4980ef38..4aaf049c88c1 100644 --- a/src/main/asciidoc/_chapters/zookeeper.adoc +++ b/src/main/asciidoc/_chapters/zookeeper.adoc @@ -441,7 +441,74 @@ This would avoid the need for a separate Hadoop jar that fixes link:https://issu ==== Elimination of `kerberos.removeHostFromPrincipal` and`kerberos.removeRealmFromPrincipal` +== TLS connection to ZooKeeper +Apache ZooKeeper also supports SSL/TLS client connections to encrypt the data in transmission. This is particularly +useful when the ZooKeeper ensemble is running on a host different from HBase and data has to be sent +over the wire. + +=== Java system properties + +The ZooKeeper client supports the following Java system properties to set up TLS connection: + +[source,bourne] +---- +zookeeper.client.secure=true +zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty +zookeeper.ssl.keyStore.location="/path/to/your/keystore" +zookeeper.ssl.keyStore.password="keystore_password" +zookeeper.ssl.trustStore.location="/path/to/your/truststore" +zookeeper.ssl.trustStore.password="truststore_password" +---- + +Setting up KeyStore is optional and only required if ZooKeeper server requests for client certificate. + +Find more detailed information in the link:https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZooKeeper+SSL+User+Guide[ZooKeeper SSL User Guide]. + +[WARNING] +These're standard Java properties which should be set in the HBase command line and are effective in +the entire Java process. All ZooKeeper clients running in the same process will pick them up including +co-processors. + +[NOTE] +Since ZooKeeper version 3.8 the following two properties are useful to store the +keystore and truststore passwords in protected text files rather than exposing them in the command line. + +[source,bourne] +---- +zookeeper.ssl.keyStore.passwordPath=/path/to/secure/file +zookeeper.ssl.trustStore.passwordPath=/path/to/secure/file +---- + +=== HBase configuration + +By adding link:https://issues.apache.org/jira/browse/HBASE-28038[HBASE-28038], ZooKeeper client TLS +settings are also available in _hbase-site.xml_ via `hbase.zookeeper.property` prefix. In contrast +to Java system properties this could be more convenient under some circumstances. + +[source,xml] +---- + + + + hbase.zookeeper.property.client.secure + true + + + hbase.zookeeper.property.clientCnxnSocket + org.apache.zookeeper.ClientCnxnSocketNetty + + + hbase.zookeeper.property.ssl.trustStore.location + /path/to/your/truststore + +... + +---- + +[NOTE] +These settings are eventually transformed into Java system properties, it's just a convenience feature. +So, the same rules that mentioned in the previous point, applies to them as well. ifdef::backend-docbook[] [index] From 7209d10c89642d3a71464a48f8321ebda218eeb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 16:21:22 +0800 Subject: [PATCH 067/514] HBASE-28072 Bump gitpython from 3.1.32 to 3.1.34 in /dev-support/flaky-tests (#5385) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.32 to 3.1.34. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.32...3.1.34) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index cff46e1a75cf..5326cc750bba 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -17,6 +17,6 @@ # requests==2.31.0 future==0.18.3 -gitpython==3.1.32 +gitpython==3.1.34 rbtools==4.0 jinja2==3.1.2 From 0a93d902629b3c1270e2152132cc7856cecad49f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 16:44:42 +0800 Subject: [PATCH 068/514] HBASE-28074 Bump gitpython from 3.1.34 to 3.1.35 in /dev-support/flaky-tests (#5395) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.34 to 3.1.35. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.34...3.1.35) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index 5326cc750bba..106ead4aa4aa 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -17,6 +17,6 @@ # requests==2.31.0 future==0.18.3 -gitpython==3.1.34 +gitpython==3.1.35 rbtools==4.0 jinja2==3.1.2 From afb3c0dca0192c82de767b8709d756d11458d843 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 16:45:11 +0800 Subject: [PATCH 069/514] HBASE-28073 Bump gitpython in /dev-support/git-jira-release-audit (#5396) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.32 to 3.1.35. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.32...3.1.35) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 9d30767cb3c1..47e6a96aa77e 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -23,7 +23,7 @@ cryptography==41.0.3 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 -GitPython==3.1.32 +GitPython==3.1.35 idna==2.8 jira==2.0.0 oauthlib==3.1.0 From 75bcd3c49b093e3bb580377da8ec7858797fd437 Mon Sep 17 00:00:00 2001 From: Nikita Pande <37657012+nikita15p@users.noreply.github.com> Date: Tue, 12 Sep 2023 15:10:42 +0530 Subject: [PATCH 070/514] HBASE-27991 [hbase-examples] MultiThreadedClientExample throws java.lang.ClassCastException (#5346) Signed-off-by: Nihal Jain Signed-off-by: Nick Dimiduk --- .../example/MultiThreadedClientExample.java | 6 +- .../TestMultiThreadedClientExample.java | 73 +++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestMultiThreadedClientExample.java diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java index ca3470d7b6b1..da81351bcf6c 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java @@ -23,10 +23,11 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.ForkJoinPool; import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.Cell; @@ -129,7 +130,8 @@ public int run(String[] args) throws Exception { // // We don't want to mix hbase and business logic. // - ExecutorService service = new ForkJoinPool(threads * 2); + ThreadPoolExecutor service = new ThreadPoolExecutor(threads * 2, threads * 2, 60L, + TimeUnit.SECONDS, new LinkedBlockingQueue<>()); // Create two different connections showing how it's possible to // separate different types of requests onto different connections diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestMultiThreadedClientExample.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestMultiThreadedClientExample.java new file mode 100644 index 000000000000..0db1b229361e --- /dev/null +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestMultiThreadedClientExample.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.example; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, MediumTests.class }) +public class TestMultiThreadedClientExample { + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static String tableName = "test_mt_table"; + private static Table table; + static final TableName MY_TABLE_NAME = TableName.valueOf(tableName); + private static byte[] familyName = Bytes.toBytes("d"); + private static byte[] columnName = Bytes.toBytes("col"); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMultiThreadedClientExample.class); + + @BeforeClass + public static void setup() throws Exception { + TEST_UTIL.startMiniCluster(1); + table = TEST_UTIL.createTable(MY_TABLE_NAME, familyName); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.deleteTable(MY_TABLE_NAME); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMultiThreadedClientExample() throws Exception { + MultiThreadedClientExample example = new MultiThreadedClientExample(); + example.setConf(TEST_UTIL.getConfiguration()); + String[] args = { tableName, "200" }; + // Define assertions to check the returned data here + assertEquals(0, example.run(args)); + // Define assertions to check the row count of the table + int rows = TEST_UTIL.countRows(table); + assertNotEquals(0, rows); + } +} From 02c63cdec277f9963305eef128023320673162e7 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 12 Sep 2023 08:39:22 -0400 Subject: [PATCH 071/514] HBASE-28008 Add support for netty tcnative (#5363) Signed-off-by: Duo Zhang Reviewed-by: Andor Molnar --- .../hadoop/hbase/io/crypto/tls/X509Util.java | 75 +++++++++++++++++-- 1 file changed, 68 insertions(+), 7 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java index ac910c4d1239..7d16a82b1f3e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java @@ -27,8 +27,11 @@ import java.security.Security; import java.security.cert.PKIXBuilderParameters; import java.security.cert.X509CertSelector; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import javax.net.ssl.CertPathTrustManagerParameters; import javax.net.ssl.KeyManager; @@ -49,8 +52,10 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ObjectArrays; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.OpenSsl; import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslContext; import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslContextBuilder; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslProvider; /** * Utility code for X509 handling Default cipher suites: Performance testing done by Facebook @@ -83,9 +88,10 @@ public final class X509Util { public static final String TLS_CONFIG_OCSP = CONFIG_PREFIX + "ocsp"; public static final String TLS_CONFIG_REVERSE_DNS_LOOKUP_ENABLED = CONFIG_PREFIX + "host-verification.reverse-dns.enabled"; - private static final String TLS_ENABLED_PROTOCOLS = CONFIG_PREFIX + "enabledProtocols"; - private static final String TLS_CIPHER_SUITES = CONFIG_PREFIX + "ciphersuites"; + public static final String TLS_ENABLED_PROTOCOLS = CONFIG_PREFIX + "enabledProtocols"; + public static final String TLS_CIPHER_SUITES = CONFIG_PREFIX + "ciphersuites"; public static final String TLS_CERT_RELOAD = CONFIG_PREFIX + "certReload"; + public static final String TLS_USE_OPENSSL = CONFIG_PREFIX + "useOpenSsl"; public static final String DEFAULT_PROTOCOL = "TLSv1.2"; // @@ -131,6 +137,34 @@ private static String[] getCBCCiphers() { private static final String[] DEFAULT_CIPHERS_JAVA9 = ObjectArrays.concat(getGCMCiphers(), getCBCCiphers(), String.class); + private static final String[] DEFAULT_CIPHERS_OPENSSL = getOpenSslFilteredDefaultCiphers(); + + /** + * Not all of our default ciphers are available in OpenSSL. Takes our default cipher lists and + * filters them to only those available in OpenSsl. Does GCM first, then CBC because GCM tends to + * be better and faster, and we don't need to worry about the java8 vs 9 performance issue if + * OpenSSL is handling it. + */ + private static String[] getOpenSslFilteredDefaultCiphers() { + if (!OpenSsl.isAvailable()) { + return new String[0]; + } + + Set openSslSuites = OpenSsl.availableJavaCipherSuites(); + List defaultSuites = new ArrayList<>(); + for (String cipher : getGCMCiphers()) { + if (openSslSuites.contains(cipher)) { + defaultSuites.add(cipher); + } + } + for (String cipher : getCBCCiphers()) { + if (openSslSuites.contains(cipher)) { + defaultSuites.add(cipher); + } + } + return defaultSuites.toArray(new String[0]); + } + /** * Enum specifying the client auth requirement of server-side TLS sockets created by this * X509Util. @@ -176,7 +210,10 @@ private X509Util() { // disabled } - static String[] getDefaultCipherSuites() { + static String[] getDefaultCipherSuites(boolean useOpenSsl) { + if (useOpenSsl) { + return DEFAULT_CIPHERS_OPENSSL; + } return getDefaultCipherSuitesForJavaVersion(System.getProperty("java.specification.version")); } @@ -202,6 +239,7 @@ public static SslContext createSslContextForClient(Configuration config) SslContextBuilder sslContextBuilder = SslContextBuilder.forClient(); + boolean useOpenSsl = configureOpenSslIfAvailable(sslContextBuilder, config); String keyStoreLocation = config.get(TLS_CONFIG_KEYSTORE_LOCATION, ""); char[] keyStorePassword = config.getPassword(TLS_CONFIG_KEYSTORE_PASSWORD); String keyStoreType = config.get(TLS_CONFIG_KEYSTORE_TYPE, ""); @@ -234,11 +272,33 @@ public static SslContext createSslContextForClient(Configuration config) sslContextBuilder.enableOcsp(sslOcspEnabled); sslContextBuilder.protocols(getEnabledProtocols(config)); - sslContextBuilder.ciphers(Arrays.asList(getCipherSuites(config))); + sslContextBuilder.ciphers(Arrays.asList(getCipherSuites(config, useOpenSsl))); return sslContextBuilder.build(); } + /** + * Adds SslProvider.OPENSSL if OpenSsl is available and enabled. In order to make it available, + * one must ensure that a properly shaded netty-tcnative is on the classpath. Properly shaded + * means relocated to be prefixed with "org.apache.hbase.thirdparty" like the rest of the netty + * classes. + */ + private static boolean configureOpenSslIfAvailable(SslContextBuilder sslContextBuilder, + Configuration conf) { + if (OpenSsl.isAvailable() && conf.getBoolean(TLS_USE_OPENSSL, true)) { + LOG.debug("Using netty-tcnative to accelerate SSL handling"); + sslContextBuilder.sslProvider(SslProvider.OPENSSL); + return true; + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Using default JDK SSL provider because netty-tcnative is not {}", + OpenSsl.isAvailable() ? "enabled" : "available"); + } + sslContextBuilder.sslProvider(SslProvider.JDK); + return false; + } + } + public static SslContext createSslContextForServer(Configuration config) throws X509Exception, IOException { String keyStoreLocation = config.get(TLS_CONFIG_KEYSTORE_LOCATION, ""); @@ -254,6 +314,7 @@ public static SslContext createSslContextForServer(Configuration config) sslContextBuilder = SslContextBuilder .forServer(createKeyManager(keyStoreLocation, keyStorePassword, keyStoreType)); + boolean useOpenSsl = configureOpenSslIfAvailable(sslContextBuilder, config); String trustStoreLocation = config.get(TLS_CONFIG_TRUSTSTORE_LOCATION, ""); char[] trustStorePassword = config.getPassword(TLS_CONFIG_TRUSTSTORE_PASSWORD); String trustStoreType = config.get(TLS_CONFIG_TRUSTSTORE_TYPE, ""); @@ -277,7 +338,7 @@ public static SslContext createSslContextForServer(Configuration config) sslContextBuilder.enableOcsp(sslOcspEnabled); sslContextBuilder.protocols(getEnabledProtocols(config)); - sslContextBuilder.ciphers(Arrays.asList(getCipherSuites(config))); + sslContextBuilder.ciphers(Arrays.asList(getCipherSuites(config, useOpenSsl))); sslContextBuilder.clientAuth(clientAuth.toNettyClientAuth()); return sslContextBuilder.build(); @@ -393,10 +454,10 @@ private static String[] getEnabledProtocols(Configuration config) { return enabledProtocolsInput.split(","); } - private static String[] getCipherSuites(Configuration config) { + private static String[] getCipherSuites(Configuration config, boolean useOpenSsl) { String cipherSuitesInput = config.get(TLS_CIPHER_SUITES); if (cipherSuitesInput == null) { - return getDefaultCipherSuites(); + return getDefaultCipherSuites(useOpenSsl); } else { return cipherSuitesInput.split(","); } From bac3198e5996b5ebcca6fcfec75fe3f466b9d38d Mon Sep 17 00:00:00 2001 From: Fantasy-Jay <13631435453@163.com> Date: Wed, 13 Sep 2023 02:16:42 +0800 Subject: [PATCH 072/514] HBASE-27853 Add client side table metrics for rpc calls and request latency. (#5228) Signed-off-by: Bryan Beaudreault --- .../client/AsyncBatchRpcRetryingCaller.java | 2 +- .../hbase/client/AsyncConnectionImpl.java | 4 +- .../hbase/client/AsyncRpcRetryingCaller.java | 2 +- ...syncScanSingleRegionRpcRetryingCaller.java | 6 +- .../hadoop/hbase/client/ConnectionUtils.java | 6 +- .../hbase/client/MetricsConnection.java | 70 ++++-- .../hadoop/hbase/ipc/AbstractRpcClient.java | 2 +- .../ipc/DelegatingHBaseRpcController.java | 10 + .../hadoop/hbase/ipc/HBaseRpcController.java | 10 + .../hbase/ipc/HBaseRpcControllerImpl.java | 13 ++ .../hbase/client/TestMetricsConnection.java | 199 +++++++++++++----- .../hbase/client/TestClientTableMetrics.java | 148 +++++++++++++ 12 files changed, 400 insertions(+), 72 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index c485a0a2c05c..4b28d4cd4e2d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -395,7 +395,7 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr } HBaseRpcController controller = conn.rpcControllerFactory.newController(); resetController(controller, Math.min(rpcTimeoutNs, remainingNs), - calcPriority(serverReq.getPriority(), tableName)); + calcPriority(serverReq.getPriority(), tableName), tableName); controller.setRequestAttributes(requestAttributes); if (!cells.isEmpty()) { controller.setCellScanner(createCellScanner(cells)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 4900581c69ad..3f0e3e0b370e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -144,8 +144,8 @@ public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, Stri this.connConf = new AsyncConnectionConfiguration(conf); this.registry = registry; if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) { - this.metrics = - Optional.of(MetricsConnection.getMetricsConnection(metricsScope, () -> null, () -> null)); + this.metrics = Optional + .of(MetricsConnection.getMetricsConnection(conf, metricsScope, () -> null, () -> null)); } else { this.metrics = Optional.empty(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index c3dd8740854e..32da6eedd10f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -121,7 +121,7 @@ protected final void resetCallTimeout() { } else { callTimeoutNs = rpcTimeoutNs; } - resetController(controller, callTimeoutNs, priority); + resetController(controller, callTimeoutNs, priority, getTableName().orElse(null)); } private void tryScheduleRetry(Throwable error) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index a5d4ef6407e1..7e3c4340947b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -354,7 +354,7 @@ private long elapsedMs() { private void closeScanner() { incRPCCallsMetrics(scanMetrics, regionServerRemote); - resetController(controller, rpcTimeoutNs, HConstants.HIGH_QOS); + resetController(controller, rpcTimeoutNs, HConstants.HIGH_QOS, loc.getRegion().getTable()); ScanRequest req = RequestConverter.buildScanRequest(this.scannerId, 0, true, false); stub.scan(controller, req, resp -> { if (controller.failed()) { @@ -574,7 +574,7 @@ private void call() { if (tries > 1) { incRPCRetriesMetrics(scanMetrics, regionServerRemote); } - resetController(controller, callTimeoutNs, priority); + resetController(controller, callTimeoutNs, priority, loc.getRegion().getTable()); ScanRequest req = RequestConverter.buildScanRequest(scannerId, scan.getCaching(), false, nextCallSeq, scan.isScanMetricsEnabled(), false, scan.getLimit()); final Context context = Context.current(); @@ -596,7 +596,7 @@ private void next() { private void renewLease() { incRPCCallsMetrics(scanMetrics, regionServerRemote); nextCallSeq++; - resetController(controller, rpcTimeoutNs, priority); + resetController(controller, rpcTimeoutNs, priority, loc.getRegion().getTable()); ScanRequest req = RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq, false, true, -1); stub.scan(controller, req, resp -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 4732da6f04ee..4827708a02e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -196,13 +196,17 @@ static boolean isEmptyStopRow(byte[] row) { return Bytes.equals(row, EMPTY_END_ROW); } - static void resetController(HBaseRpcController controller, long timeoutNs, int priority) { + static void resetController(HBaseRpcController controller, long timeoutNs, int priority, + TableName tableName) { controller.reset(); if (timeoutNs >= 0) { controller.setCallTimeout( (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(timeoutNs))); } controller.setPriority(priority); + if (tableName != null) { + controller.setTableName(tableName); + } } static Throwable translateException(Throwable t) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 8a299dc4e5c1..d4edf018d6d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -34,8 +34,10 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; @@ -51,10 +53,10 @@ * This class is for maintaining the various connection statistics and publishing them through the * metrics interfaces. This class manages its own {@link MetricRegistry} and {@link JmxReporter} so * as to not conflict with other uses of Yammer Metrics within the client application. Calling - * {@link #getMetricsConnection(String, Supplier, Supplier)} implicitly creates and "starts" - * instances of these classes; be sure to call {@link #deleteMetricsConnection(String)} to terminate - * the thread pools they allocate. The metrics reporter will be shutdown {@link #shutdown()} when - * all connections within this metrics instances are closed. + * {@link #getMetricsConnection(Configuration, String, Supplier, Supplier)} implicitly creates and + * "starts" instances of these classes; be sure to call {@link #deleteMetricsConnection(String)} to + * terminate the thread pools they allocate. The metrics reporter will be shutdown + * {@link #shutdown()} when all connections within this metrics instances are closed. */ @InterfaceAudience.Private public final class MetricsConnection implements StatisticTrackable { @@ -62,11 +64,11 @@ public final class MetricsConnection implements StatisticTrackable { private static final ConcurrentMap METRICS_INSTANCES = new ConcurrentHashMap<>(); - static MetricsConnection getMetricsConnection(final String scope, + static MetricsConnection getMetricsConnection(final Configuration conf, final String scope, Supplier batchPool, Supplier metaPool) { return METRICS_INSTANCES.compute(scope, (s, metricsConnection) -> { if (metricsConnection == null) { - MetricsConnection newMetricsConn = new MetricsConnection(scope, batchPool, metaPool); + MetricsConnection newMetricsConn = new MetricsConnection(conf, scope, batchPool, metaPool); newMetricsConn.incrConnectionCount(); return newMetricsConn; } else { @@ -91,6 +93,10 @@ static void deleteMetricsConnection(final String scope) { /** Set this key to {@code true} to enable metrics collection of client requests. */ public static final String CLIENT_SIDE_METRICS_ENABLED_KEY = "hbase.client.metrics.enable"; + /** Set this key to {@code true} to enable table metrics collection of client requests. */ + public static final String CLIENT_SIDE_TABLE_METRICS_ENABLED_KEY = + "hbase.client.table.metrics.enable"; + /** * Set to specify a custom scope for the metrics published through {@link MetricsConnection}. The * scope is added to JMX MBean objectName, and defaults to a combination of the Connection's @@ -311,6 +317,7 @@ private static interface NewMetric { private final MetricRegistry registry; private final JmxReporter reporter; private final String scope; + private final boolean tableMetricsEnabled; private final NewMetric timerFactory = new NewMetric() { @Override @@ -374,9 +381,10 @@ public Counter newMetric(Class clazz, String name, String scope) { private final ConcurrentMap rpcCounters = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - private MetricsConnection(String scope, Supplier batchPool, - Supplier metaPool) { + private MetricsConnection(Configuration conf, String scope, + Supplier batchPool, Supplier metaPool) { this.scope = scope; + this.tableMetricsEnabled = conf.getBoolean(CLIENT_SIDE_TABLE_METRICS_ENABLED_KEY, false); addThreadPools(batchPool, metaPool); this.registry = new MetricRegistry(); this.registry.register(getExecutorPoolName(), new RatioGauge() { @@ -506,6 +514,16 @@ public ConcurrentMap getRpcCounters() { return rpcCounters; } + /** rpcTimers metric */ + public ConcurrentMap getRpcTimers() { + return rpcTimers; + } + + /** rpcHistograms metric */ + public ConcurrentMap getRpcHistograms() { + return rpcHistograms; + } + /** getTracker metric */ public CallTracker getGetTracker() { return getTracker; @@ -646,7 +664,8 @@ private void shutdown() { } /** Report RPC context to metrics system. */ - public void updateRpc(MethodDescriptor method, Message param, CallStats stats, Throwable e) { + public void updateRpc(MethodDescriptor method, TableName tableName, Message param, + CallStats stats, Throwable e) { int callsPerServer = stats.getConcurrentCallsPerServer(); if (callsPerServer > 0) { concurrentCallsPerServerHist.update(callsPerServer); @@ -696,6 +715,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats, T case 0: assert "Get".equals(method.getName()); getTracker.updateRpc(stats); + updateTableMetric(methodName.toString(), tableName, stats, e); return; case 1: assert "Mutate".equals(method.getName()); @@ -703,22 +723,25 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats, T switch (mutationType) { case APPEND: appendTracker.updateRpc(stats); - return; + break; case DELETE: deleteTracker.updateRpc(stats); - return; + break; case INCREMENT: incrementTracker.updateRpc(stats); - return; + break; case PUT: putTracker.updateRpc(stats); - return; + break; default: throw new RuntimeException("Unrecognized mutation type " + mutationType); } + updateTableMetric(methodName.toString(), tableName, stats, e); + return; case 2: assert "Scan".equals(method.getName()); scanTracker.updateRpc(stats); + updateTableMetric(methodName.toString(), tableName, stats, e); return; case 3: assert "BulkLoadHFile".equals(method.getName()); @@ -744,6 +767,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats, T assert "Multi".equals(method.getName()); numActionsPerServerHist.update(stats.getNumActionsPerServer()); multiTracker.updateRpc(stats); + updateTableMetric(methodName.toString(), tableName, stats, e); return; default: throw new RuntimeException("Unrecognized ClientService RPC type " + method.getFullName()); @@ -753,6 +777,26 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats, T updateRpcGeneric(methodName.toString(), stats); } + /** Report table rpc context to metrics system. */ + private void updateTableMetric(String methodName, TableName tableName, CallStats stats, + Throwable e) { + if (tableMetricsEnabled) { + if (methodName != null) { + String table = tableName != null && StringUtils.isNotEmpty(tableName.getNameAsString()) + ? tableName.getNameAsString() + : "unknown"; + String metricKey = methodName + "_" + table; + // update table rpc context to metrics system, + // includes rpc call duration, rpc call request/response size(bytes). + updateRpcGeneric(metricKey, stats); + if (e != null) { + // rpc failure call counter with table name. + getMetric(FAILURE_CNT_BASE + metricKey, rpcCounters, counterFactory).inc(); + } + } + } + } + public void incrCacheDroppingExceptions(Object exception) { getMetric( CACHE_BASE + (exception == null ? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 5e42558671b7..fcded9f5b69d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -379,7 +379,7 @@ private void onCallFinished(Call call, HBaseRpcController hrc, Address addr, RpcCallback callback) { call.callStats.setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.getStartTime()); if (metrics != null) { - metrics.updateRpc(call.md, call.param, call.callStats, call.error); + metrics.updateRpc(call.md, hrc.getTableName(), call.param, call.callStats, call.error); } if (LOG.isTraceEnabled()) { LOG.trace("CallId: {}, call: {}, startTime: {}ms, callTime: {}ms, status: {}", call.id, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java index c752f4c18355..2b8839bf8462 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java @@ -143,4 +143,14 @@ public void notifyOnCancel(RpcCallback callback, CancellationCallback ac throws IOException { delegate.notifyOnCancel(callback, action); } + + @Override + public void setTableName(TableName tableName) { + delegate.setTableName(tableName); + } + + @Override + public TableName getTableName() { + return delegate.getTableName(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java index cd303a5eda77..4d3e038bb5ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java @@ -130,4 +130,14 @@ default boolean hasRegionInfo() { default RegionInfo getRegionInfo() { return null; } + + /** Sets Region's table name. */ + default void setTableName(TableName tableName) { + + } + + /** Returns Region's table name or null if not available or pertinent. */ + default TableName getTableName() { + return null; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java index 425c5e77afcd..54e9310b5ae7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java @@ -51,6 +51,8 @@ public class HBaseRpcControllerImpl implements HBaseRpcController { private IOException exception; + private TableName tableName; + /** * Rpc target Region's RegionInfo we are going against. May be null. * @see #hasRegionInfo() @@ -144,6 +146,7 @@ public void reset() { exception = null; callTimeout = null; regionInfo = null; + tableName = null; // In the implementations of some callable with replicas, rpc calls are executed in a executor // and we could cancel the operation from outside which means there could be a race between // reset and startCancel. Although I think the race should be handled by the callable since the @@ -273,4 +276,14 @@ public synchronized void notifyOnCancel(RpcCallback callback, Cancellati action.run(false); } } + + @Override + public void setTableName(TableName tableName) { + this.tableName = tableName; + } + + @Override + public TableName getTableName() { + return tableName; + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index 2afdc7ee558d..e0d18f6bbb7e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -18,19 +18,23 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import com.codahale.metrics.Counter; import com.codahale.metrics.RatioGauge; import com.codahale.metrics.RatioGauge.Ratio; +import com.codahale.metrics.Timer; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.concurrent.Executors; import java.util.concurrent.ThreadPoolExecutor; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.CallTimeoutException; import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; import org.apache.hadoop.hbase.security.User; @@ -38,15 +42,20 @@ import org.apache.hadoop.hbase.testclassification.MetricsTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; @@ -56,25 +65,37 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +@RunWith(Parameterized.class) @Category({ ClientTests.class, MetricsTests.class, SmallTests.class }) public class TestMetricsConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsConnection.class); + private static final Configuration conf = new Configuration(); private static MetricsConnection METRICS; private static final ThreadPoolExecutor BATCH_POOL = (ThreadPoolExecutor) Executors.newFixedThreadPool(2); private static final String MOCK_CONN_STR = "mocked-connection"; - @BeforeClass - public static void beforeClass() { - METRICS = MetricsConnection.getMetricsConnection(MOCK_CONN_STR, () -> BATCH_POOL, () -> null); + @Parameter() + public boolean tableMetricsEnabled; + + @Parameters + public static List params() { + return Arrays.asList(false, true); + } + + @Before + public void before() { + conf.setBoolean(MetricsConnection.CLIENT_SIDE_TABLE_METRICS_ENABLED_KEY, tableMetricsEnabled); + METRICS = + MetricsConnection.getMetricsConnection(conf, MOCK_CONN_STR, () -> BATCH_POOL, () -> null); } - @AfterClass - public static void afterClass() { + @After + public void after() { MetricsConnection.deleteMetricsConnection(MOCK_CONN_STR); } @@ -146,35 +167,52 @@ public void testMetricsWithMultiConnections() throws IOException { @Test public void testStaticMetrics() throws IOException { final byte[] foo = Bytes.toBytes("foo"); - final RegionSpecifier region = RegionSpecifier.newBuilder().setValue(ByteString.EMPTY) - .setType(RegionSpecifierType.REGION_NAME).build(); + String table = "TableX"; + final RegionSpecifier region = RegionSpecifier.newBuilder() + .setValue(ByteString.copyFromUtf8(table)).setType(RegionSpecifierType.REGION_NAME).build(); final int loop = 5; for (int i = 0; i < loop; i++) { METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Get"), - GetRequest.getDefaultInstance(), MetricsConnection.newCallStats(), null); + TableName.valueOf(table), + GetRequest.newBuilder().setRegion(region).setGet(ProtobufUtil.toGet(new Get(foo))).build(), + MetricsConnection.newCallStats(), null); METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Scan"), - ScanRequest.getDefaultInstance(), MetricsConnection.newCallStats(), + TableName.valueOf(table), + ScanRequest.newBuilder().setRegion(region) + .setScan(ProtobufUtil.toScan(new Scan(new Get(foo)))).build(), + MetricsConnection.newCallStats(), new RemoteWithExtrasException("java.io.IOException", null, false, false)); METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Multi"), - MultiRequest.getDefaultInstance(), MetricsConnection.newCallStats(), + TableName.valueOf(table), + MultiRequest.newBuilder() + .addRegionAction(ClientProtos.RegionAction.newBuilder() + .addAction( + ClientProtos.Action.newBuilder().setGet(ProtobufUtil.toGet(new Get(foo))).build()) + .setRegion(region).build()) + .build(), + MetricsConnection.newCallStats(), new CallTimeoutException("test with CallTimeoutException")); METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + TableName.valueOf(table), MutateRequest.newBuilder() .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) .setRegion(region).build(), MetricsConnection.newCallStats(), null); METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + TableName.valueOf(table), MutateRequest.newBuilder() .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) .setRegion(region).build(), MetricsConnection.newCallStats(), null); METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + TableName.valueOf(table), MutateRequest.newBuilder() .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) .setRegion(region).build(), MetricsConnection.newCallStats(), null); METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + TableName.valueOf(table), MutateRequest.newBuilder() .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))).setRegion(region) .build(), @@ -182,48 +220,12 @@ public void testStaticMetrics() throws IOException { new CallTimeoutException("test with CallTimeoutException")); } - final String rpcCountPrefix = "rpcCount_" + ClientService.getDescriptor().getName() + "_"; - final String rpcFailureCountPrefix = - "rpcFailureCount_" + ClientService.getDescriptor().getName() + "_"; + testRpcCallMetrics(table, loop); + String metricKey; long metricVal; Counter counter; - for (String method : new String[] { "Get", "Scan", "Multi" }) { - metricKey = rpcCountPrefix + method; - metricVal = METRICS.getRpcCounters().get(metricKey).getCount(); - assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); - - metricKey = rpcFailureCountPrefix + method; - counter = METRICS.getRpcCounters().get(metricKey); - metricVal = (counter != null) ? counter.getCount() : 0; - if (method.equals("Get")) { - // no failure - assertEquals("metric: " + metricKey + " val: " + metricVal, 0, metricVal); - } else { - // has failure - assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); - } - } - - String method = "Mutate"; - for (String mutationType : new String[] { "Append", "Delete", "Increment", "Put" }) { - metricKey = rpcCountPrefix + method + "(" + mutationType + ")"; - metricVal = METRICS.getRpcCounters().get(metricKey).getCount(); - assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); - - metricKey = rpcFailureCountPrefix + method + "(" + mutationType + ")"; - counter = METRICS.getRpcCounters().get(metricKey); - metricVal = (counter != null) ? counter.getCount() : 0; - if (mutationType.equals("Put")) { - // has failure - assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop); - } else { - // no failure - assertEquals("metric: " + metricKey + " val: " + metricVal, 0, metricVal); - } - } - // remote exception metricKey = "rpcRemoteExceptions_IOException"; counter = METRICS.getRpcCounters().get(metricKey); @@ -242,6 +244,8 @@ public void testStaticMetrics() throws IOException { metricVal = (counter != null) ? counter.getCount() : 0; assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, loop * 3); + testRpcCallTableMetrics(table, loop); + for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getGetTracker(), METRICS.getScanTracker(), METRICS.getMultiTracker(), METRICS.getAppendTracker(), METRICS.getDeleteTracker(), METRICS.getIncrementTracker(), @@ -257,4 +261,99 @@ public void testStaticMetrics() throws IOException { assertEquals(Ratio.of(0, 3).getValue(), executorMetrics.getValue(), 0); assertEquals(Double.NaN, metaMetrics.getValue(), 0); } + + private void testRpcCallTableMetrics(String table, int expectedVal) { + String metricKey; + Timer timer; + String numOpsSuffix = "_num_ops"; + String p95Suffix = "_95th_percentile"; + String p99Suffix = "_99th_percentile"; + String service = ClientService.getDescriptor().getName(); + for (String m : new String[] { "Get", "Scan", "Multi" }) { + metricKey = "rpcCallDurationMs_" + service + "_" + m + "_" + table; + timer = METRICS.getRpcTimers().get(metricKey); + if (tableMetricsEnabled) { + long numOps = timer.getCount(); + double p95 = timer.getSnapshot().get95thPercentile(); + double p99 = timer.getSnapshot().get99thPercentile(); + assertEquals("metric: " + metricKey + numOpsSuffix + " val: " + numOps, expectedVal, + numOps); + assertTrue("metric: " + metricKey + p95Suffix + " val: " + p95, p95 >= 0); + assertTrue("metric: " + metricKey + p99Suffix + " val: " + p99, p99 >= 0); + } else { + assertNull(timer); + } + } + + // Distinguish mutate types for mutate method. + String mutateMethod = "Mutate"; + for (String mutationType : new String[] { "Append", "Delete", "Increment", "Put" }) { + metricKey = "rpcCallDurationMs_" + service + "_" + mutateMethod + "(" + mutationType + ")" + + "_" + table; + timer = METRICS.getRpcTimers().get(metricKey); + if (tableMetricsEnabled) { + long numOps = timer.getCount(); + double p95 = timer.getSnapshot().get95thPercentile(); + double p99 = timer.getSnapshot().get99thPercentile(); + assertEquals("metric: " + metricKey + numOpsSuffix + " val: " + numOps, expectedVal, + numOps); + assertTrue("metric: " + metricKey + p95Suffix + " val: " + p95, p95 >= 0); + assertTrue("metric: " + metricKey + p99Suffix + " val: " + p99, p99 >= 0); + } else { + assertNull(timer); + } + } + } + + private void testRpcCallMetrics(String table, int expectedVal) { + final String rpcCountPrefix = "rpcCount_" + ClientService.getDescriptor().getName() + "_"; + final String rpcFailureCountPrefix = + "rpcFailureCount_" + ClientService.getDescriptor().getName() + "_"; + String metricKey; + long metricVal; + Counter counter; + + for (String method : new String[] { "Get", "Scan", "Multi" }) { + // rpc call count + metricKey = rpcCountPrefix + method; + metricVal = METRICS.getRpcCounters().get(metricKey).getCount(); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, expectedVal); + + // rpc failure call + metricKey = tableMetricsEnabled + ? rpcFailureCountPrefix + method + "_" + table + : rpcFailureCountPrefix + method; + counter = METRICS.getRpcCounters().get(metricKey); + metricVal = (counter != null) ? counter.getCount() : 0; + if (method.equals("Get")) { + // no failure + assertEquals("metric: " + metricKey + " val: " + metricVal, 0, metricVal); + } else { + // has failure + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, expectedVal); + } + } + + String method = "Mutate"; + for (String mutationType : new String[] { "Append", "Delete", "Increment", "Put" }) { + // rpc call count + metricKey = rpcCountPrefix + method + "(" + mutationType + ")"; + metricVal = METRICS.getRpcCounters().get(metricKey).getCount(); + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, expectedVal); + + // rpc failure call + metricKey = tableMetricsEnabled + ? rpcFailureCountPrefix + method + "(" + mutationType + ")" + "_" + table + : rpcFailureCountPrefix + method + "(" + mutationType + ")"; + counter = METRICS.getRpcCounters().get(metricKey); + metricVal = (counter != null) ? counter.getCount() : 0; + if (mutationType.equals("Put")) { + // has failure + assertEquals("metric: " + metricKey + " val: " + metricVal, metricVal, expectedVal); + } else { + // no failure + assertEquals("metric: " + metricKey + " val: " + metricVal, 0, metricVal); + } + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java new file mode 100644 index 000000000000..c0980c51256a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.codahale.metrics.Timer; +import java.io.IOException; +import java.util.Arrays; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; + +@Category(MediumTests.class) +public class TestClientTableMetrics { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestClientTableMetrics.class); + + private static HBaseTestingUtil UTIL; + private static Connection CONN; + private static MetricsConnection METRICS; + private static final String tableName = "table_1"; + private static final TableName TABLE_1 = TableName.valueOf(tableName); + private static final byte[] FAMILY = Bytes.toBytes("f"); + + @BeforeClass + public static void beforeClass() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true); + conf.setBoolean(MetricsConnection.CLIENT_SIDE_TABLE_METRICS_ENABLED_KEY, true); + UTIL = new HBaseTestingUtil(conf); + UTIL.startMiniCluster(2); + UTIL.createTable(TABLE_1, FAMILY); + UTIL.waitTableAvailable(TABLE_1); + CONN = UTIL.getConnection(); + METRICS = ((AsyncConnectionImpl) CONN.toAsyncConnection()).getConnectionMetrics().get(); + } + + @AfterClass + public static void afterClass() throws Exception { + UTIL.deleteTableIfAny(TABLE_1); + UTIL.shutdownMiniCluster(); + } + + @Test + public void testGetTableMetrics() throws IOException { + Table table = CONN.getTable(TABLE_1); + table.get(new Get(Bytes.toBytes("row1"))); + table.get(new Get(Bytes.toBytes("row2"))); + table.get(new Get(Bytes.toBytes("row3"))); + table.close(); + + String metricKey = + "rpcCallDurationMs_" + ClientService.getDescriptor().getName() + "_Get_" + tableName; + verifyTableMetrics(metricKey, 3); + } + + @Test + public void testMutateTableMetrics() throws IOException { + Table table = CONN.getTable(TABLE_1); + // PUT + Put put = new Put(Bytes.toBytes("row1")); + put.addColumn(FAMILY, Bytes.toBytes("name"), Bytes.toBytes("tom")); + table.put(put); + put = new Put(Bytes.toBytes("row2")); + put.addColumn(FAMILY, Bytes.toBytes("name"), Bytes.toBytes("jerry")); + table.put(put); + // DELETE + table.delete(new Delete(Bytes.toBytes("row1"))); + table.close(); + + String metricKey = + "rpcCallDurationMs_" + ClientService.getDescriptor().getName() + "_Mutate(Put)_" + tableName; + verifyTableMetrics(metricKey, 2); + + metricKey = "rpcCallDurationMs_" + ClientService.getDescriptor().getName() + "_Mutate(Delete)_" + + tableName; + verifyTableMetrics(metricKey, 1); + } + + @Test + public void testScanTableMetrics() throws IOException { + Table table = CONN.getTable(TABLE_1); + table.getScanner(new Scan()); + table.close(); + + String metricKey = + "rpcCallDurationMs_" + ClientService.getDescriptor().getName() + "_Scan_" + tableName; + verifyTableMetrics(metricKey, 1); + } + + @Test + public void testMultiTableMetrics() throws IOException { + Table table = CONN.getTable(TABLE_1); + table.put(Arrays.asList( + new Put(Bytes.toBytes("row1")).addColumn(FAMILY, Bytes.toBytes("name"), Bytes.toBytes("tom")), + new Put(Bytes.toBytes("row2")).addColumn(FAMILY, Bytes.toBytes("name"), + Bytes.toBytes("jerry")))); + table.get(Arrays.asList(new Get(Bytes.toBytes("row1")), new Get(Bytes.toBytes("row2")))); + table.close(); + + String metricKey = + "rpcCallDurationMs_" + ClientService.getDescriptor().getName() + "_Multi_" + tableName; + verifyTableMetrics(metricKey, 2); + } + + private static void verifyTableMetrics(String metricKey, int expectedVal) { + String numOpsSuffix = "_num_ops"; + String p95Suffix = "_95th_percentile"; + String p99Suffix = "_99th_percentile"; + Timer timer = METRICS.getRpcTimers().get(metricKey); + long numOps = timer.getCount(); + double p95 = timer.getSnapshot().get95thPercentile(); + double p99 = timer.getSnapshot().get99thPercentile(); + assertEquals("metric: " + metricKey + numOpsSuffix + " val: " + numOps, expectedVal, numOps); + assertTrue("metric: " + metricKey + p95Suffix + " val: " + p95, p95 >= 0); + assertTrue("metric: " + metricKey + p99Suffix + " val: " + p99, p99 >= 0); + } +} From 800d8b374c3a1e74e383fa5eaab0b6c9d270a7cc Mon Sep 17 00:00:00 2001 From: Ruanhui <32773751+frostruan@users.noreply.github.com> Date: Wed, 13 Sep 2023 11:06:53 +0800 Subject: [PATCH 073/514] HBASE-28080 correct span name in AbstractRpcBasedConnectionRegistry#getActiveMaster (#5400) Co-authored-by: huiruan <876107431@qq.com> Signed-off-by: Duo Zhang --- .../hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 6dd14a520ee4..4e97dcab24dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -268,7 +268,7 @@ public CompletableFuture getActiveMaster() { (c, s, d) -> s.getActiveMaster(c, GetActiveMasterRequest.getDefaultInstance(), d), GetActiveMasterResponse::hasServerName, "getActiveMaster()") .thenApply(resp -> ProtobufUtil.toServerName(resp.getServerName())), - getClass().getSimpleName() + ".getClusterId"); + getClass().getSimpleName() + ".getActiveMaster"); } @Override From 35667c1fa4ad53f86b18d1c51c9109be247a9f0e Mon Sep 17 00:00:00 2001 From: hiping-tech <58875741+hiping-tech@users.noreply.github.com> Date: Wed, 13 Sep 2023 18:28:42 +0800 Subject: [PATCH 074/514] =?UTF-8?q?HBASE-28058=20Adjust=20the=20order=20of?= =?UTF-8?q?=20acquiring=20the=20takeSnapshot=20locks=20to=20r=E2=80=A6esol?= =?UTF-8?q?ve=20the=20deadlock=20issue.=20(#5381)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: lvhaiping.lhp Signed-off-by: Duo Zhang Signed-off-by: Pankaj Kumar (cherry picked from commit d43e6820c092e1e4a52e8fd1c2d601017b1240c9) --- .../hadoop/hbase/master/snapshot/SnapshotManager.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 2b1e6a31f92d..3c421dd8bd01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -670,7 +670,7 @@ public void takeSnapshot(SnapshotDescription snapshot) throws IOException { } } - public synchronized long takeSnapshot(SnapshotDescription snapshot, long nonceGroup, long nonce) + public long takeSnapshot(SnapshotDescription snapshot, long nonceGroup, long nonce) throws IOException { this.takingSnapshotLock.readLock().lock(); try { @@ -680,8 +680,8 @@ public synchronized long takeSnapshot(SnapshotDescription snapshot, long nonceGr } } - private long submitSnapshotProcedure(SnapshotDescription snapshot, long nonceGroup, long nonce) - throws IOException { + private synchronized long submitSnapshotProcedure(SnapshotDescription snapshot, long nonceGroup, + long nonce) throws IOException { return MasterProcedureUtil .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(master, nonceGroup, nonce) { @Override From 84ccae368f6ed94a751d576c59fc695c683f1c8b Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Wed, 13 Sep 2023 11:12:52 -0400 Subject: [PATCH 075/514] HBASE-28079 Unhandled TableExistsException and NamespaceExistException in BackupSystemTable (#5399) Signed-off-by: Duo Zhang --- .../hbase/backup/impl/BackupSystemTable.java | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 04f43b5b0ea1..55f225f41cf1 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -43,7 +43,9 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceExistException; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; @@ -202,17 +204,28 @@ private void checkSystemTable() throws IOException { Configuration conf = connection.getConfiguration(); if (!admin.tableExists(tableName)) { TableDescriptor backupHTD = BackupSystemTable.getSystemTableDescriptor(conf); - admin.createTable(backupHTD); + createSystemTable(admin, backupHTD); } if (!admin.tableExists(bulkLoadTableName)) { TableDescriptor blHTD = BackupSystemTable.getSystemTableForBulkLoadedDataDescriptor(conf); - admin.createTable(blHTD); + createSystemTable(admin, blHTD); } waitForSystemTable(admin, tableName); waitForSystemTable(admin, bulkLoadTableName); } } + private void createSystemTable(Admin admin, TableDescriptor descriptor) throws IOException { + try { + admin.createTable(descriptor); + } catch (TableExistsException e) { + // swallow because this class is initialized in concurrent environments (i.e. bulkloads), + // so may be subject to race conditions where one caller succeeds in creating the + // table and others fail because it now exists + LOG.debug("Table {} already exists, ignoring", descriptor.getTableName(), e); + } + } + private void verifyNamespaceExists(Admin admin) throws IOException { String namespaceName = tableName.getNamespaceAsString(); NamespaceDescriptor ns = NamespaceDescriptor.create(namespaceName).build(); @@ -225,7 +238,14 @@ private void verifyNamespaceExists(Admin admin) throws IOException { } } if (!exists) { - admin.createNamespace(ns); + try { + admin.createNamespace(ns); + } catch (NamespaceExistException e) { + // swallow because this class is initialized in concurrent environments (i.e. bulkloads), + // so may be subject to race conditions where one caller succeeds in creating the + // namespace and others fail because it now exists + LOG.debug("Namespace {} already exists, ignoring", ns.getName(), e); + } } } From 2892208648f1ed52cc5599ed57c26a525eb20e96 Mon Sep 17 00:00:00 2001 From: Ruanhui <32773751+frostruan@users.noreply.github.com> Date: Fri, 15 Sep 2023 16:18:44 +0800 Subject: [PATCH 076/514] add huiruan to the developer list (#5404) Co-authored-by: huiruan <876107431@qq.com> --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index e31b56d0e530..10286b31512f 100644 --- a/pom.xml +++ b/pom.xml @@ -244,6 +244,12 @@ huaxiangsun@apache.org -8 + + huiruan + Hui Ruan + huiruan@apache.org + +8 + jdcryans Jean-Daniel Cryans From ff2c10c1c254eafb2440f3f3f57968dfafc62d2f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 15 Sep 2023 18:56:19 +0800 Subject: [PATCH 077/514] HBASE-28061 HBaseTestingUtility failed to start MiniHbaseCluster in case of Hadoop3.3.1 (#5401) Co-authored-by: Butao Zhang Signed-off-by: Xin Sun --- .../asyncfs/FanOutOneBlockAsyncDFSOutput.java | 3 +- .../FanOutOneBlockAsyncDFSOutputHelper.java | 5 +- .../hadoop/hbase/util/LocatedBlockHelper.java | 57 +++++++++++++++++++ .../apache/hadoop/hbase/fs/HFileSystem.java | 4 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 3 +- .../hadoop/hbase/fs/TestBlockReorder.java | 6 +- .../fs/TestBlockReorderBlockLocation.java | 29 +++++----- .../hbase/fs/TestBlockReorderMultiBlocks.java | 21 ++++--- .../hadoop/hbase/tool/TestBulkLoadHFiles.java | 3 +- 9 files changed, 101 insertions(+), 30 deletions(-) create mode 100644 hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/LocatedBlockHelper.java diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index 5febcc8daa19..55a2f6c86ae7 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; import static org.apache.hadoop.hbase.util.NettyFutureUtils.consume; import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWrite; import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWriteAndFlush; @@ -364,7 +365,7 @@ private void setupReceiver(int timeoutMs) { this.clientName = clientName; this.src = src; this.block = locatedBlock.getBlock(); - this.locations = locatedBlock.getLocations(); + this.locations = getLocatedBlockLocations(locatedBlock); this.encryptor = encryptor; this.datanodeInfoMap = datanodeInfoMap; this.summer = summer; diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 9c66c53b8bfe..98590173ed2a 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; import static org.apache.hadoop.hbase.util.NettyFutureUtils.addListener; import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeClose; import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWriteAndFlush; @@ -383,7 +384,7 @@ private static List> connectToDataNodes(Configuration conf, DFSC BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, Class channelClass) { StorageType[] storageTypes = locatedBlock.getStorageTypes(); - DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); + DatanodeInfo[] datanodeInfos = getLocatedBlockLocations(locatedBlock); boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); @@ -495,7 +496,7 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoopGroup, channelClass); for (int i = 0, n = futureList.size(); i < n; i++) { - DatanodeInfo datanodeInfo = locatedBlock.getLocations()[i]; + DatanodeInfo datanodeInfo = getLocatedBlockLocations(locatedBlock)[i]; try { datanodes.put(futureList.get(i).syncUninterruptibly().getNow(), datanodeInfo); } catch (Exception e) { diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/LocatedBlockHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/LocatedBlockHelper.java new file mode 100644 index 000000000000..932bce2b1613 --- /dev/null +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/LocatedBlockHelper.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * hadoop 3.3.1 changed the return value of this method from {@code DatanodeInfo[]} to + * {@code DatanodeInfoWithStorage[]}, which causes the JVM can not locate the method if we are + * compiled with hadoop 3.2 and then link with hadoop 3.3+, so here we need to use reflection to + * make it work for both hadoop versions, otherwise we need to publish more artifacts for different + * hadoop versions... + */ +@InterfaceAudience.Private +public final class LocatedBlockHelper { + + private static final Method GET_LOCATED_BLOCK_LOCATIONS_METHOD; + + static { + try { + GET_LOCATED_BLOCK_LOCATIONS_METHOD = LocatedBlock.class.getMethod("getLocations"); + } catch (Exception e) { + throw new Error("Can not initialize access to HDFS LocatedBlock.getLocations method", e); + } + } + + private LocatedBlockHelper() { + } + + public static DatanodeInfo[] getLocatedBlockLocations(LocatedBlock block) { + try { + // DatanodeInfoWithStorage[] can be casted to DatanodeInfo[] directly + return (DatanodeInfo[]) GET_LOCATED_BLOCK_LOCATIONS_METHOD.invoke(block); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException(e); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index ac90d2c686c2..f893e6d73c44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.fs; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; + import edu.umd.cs.findbugs.annotations.Nullable; import java.io.Closeable; import java.io.IOException; @@ -425,7 +427,7 @@ public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) // Just check for all blocks for (LocatedBlock lb : lbs.getLocatedBlocks()) { - DatanodeInfo[] dnis = lb.getLocations(); + DatanodeInfo[] dnis = getLocatedBlockLocations(lb); if (dnis != null && dnis.length > 1) { boolean found = false; for (int i = 0; i < dnis.length - 1 && !found; i++) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index e84e9bf3a81f..0c61a1b27030 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.util; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET; import edu.umd.cs.findbugs.annotations.CheckForNull; @@ -691,7 +692,7 @@ public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOExc } private static String[] getHostsForLocations(LocatedBlock block) { - DatanodeInfo[] locations = block.getLocations(); + DatanodeInfo[] locations = getLocatedBlockLocations(block); String[] hosts = new String[locations.length]; for (int i = 0; i < hosts.length; i++) { hosts[i] = locations[i].getHostName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index c5f891caaa03..98fb53c32d14 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.fs; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; + import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.BindException; @@ -160,8 +162,8 @@ public void testBlockLocationReorder() throws Exception { @Override public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) { for (LocatedBlock lb : lbs.getLocatedBlocks()) { - if (lb.getLocations().length > 1) { - DatanodeInfo[] infos = lb.getLocations(); + if (getLocatedBlockLocations(lb).length > 1) { + DatanodeInfo[] infos = getLocatedBlockLocations(lb); if (infos[0].getHostName().equals(lookup)) { LOG.info("HFileSystem bad host, inverting"); DatanodeInfo tmp = infos[0]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java index 64c91dc6e109..bfcdcdfe6f72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.fs; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; + import java.lang.reflect.Field; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -117,21 +119,22 @@ public void testBlockLocation() throws Exception { for (int i = 0; i < 10; i++) { // The interceptor is not set in this test, so we get the raw list at this point - LocatedBlocks l; + LocatedBlocks lbs; final long max = EnvironmentEdgeManager.currentTime() + 10000; do { - l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1); - Assert.assertNotNull(l.getLocatedBlocks()); - Assert.assertEquals(1, l.getLocatedBlocks().size()); - Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length, + lbs = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1); + Assert.assertNotNull(lbs.getLocatedBlocks()); + Assert.assertEquals(1, lbs.getLocatedBlocks().size()); + Assert.assertTrue( + "Expecting " + repCount + " , got " + getLocatedBlockLocations(lbs.get(0)).length, EnvironmentEdgeManager.currentTime() < max); - } while (l.get(0).getLocations().length != repCount); + } while (getLocatedBlockLocations(lbs.get(0)).length != repCount); // Should be filtered, the name is different => The order won't change - Object originalList[] = l.getLocatedBlocks().toArray(); + Object[] originalList = lbs.getLocatedBlocks().toArray(); HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks(); - lrb.reorderBlocks(conf, l, fileName); - Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray()); + lrb.reorderBlocks(conf, lbs, fileName); + Assert.assertArrayEquals(originalList, lbs.getLocatedBlocks().toArray()); // Should be reordered, as we pretend to be a file name with a compliant stuff Assert.assertNotNull(conf.get(HConstants.HBASE_DIR)); @@ -144,12 +147,12 @@ public void testBlockLocation() throws Exception { AbstractFSWALProvider.getServerNameFromWALDirectoryName(dfs.getConf(), pseudoLogFile)); // And check we're doing the right reorder. - lrb.reorderBlocks(conf, l, pseudoLogFile); - Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName()); + lrb.reorderBlocks(conf, lbs, pseudoLogFile); + Assert.assertEquals(host1, getLocatedBlockLocations(lbs.get(0))[2].getHostName()); // Check again, it should remain the same. - lrb.reorderBlocks(conf, l, pseudoLogFile); - Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName()); + lrb.reorderBlocks(conf, lbs, pseudoLogFile); + Assert.assertEquals(host1, getLocatedBlockLocations(lbs.get(0))[2].getHostName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java index fece1d809970..b5bfc9ef86be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.fs; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; + import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.Field; @@ -245,25 +247,26 @@ private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, St throws Exception { // Multiple times as the order is random for (int i = 0; i < 10; i++) { - LocatedBlocks l; + LocatedBlocks lbs; // The NN gets the block list asynchronously, so we may need multiple tries to get the list final long max = EnvironmentEdgeManager.currentTime() + 10000; boolean done; do { Assert.assertTrue("Can't get enouth replica", EnvironmentEdgeManager.currentTime() < max); - l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1); - Assert.assertNotNull("Can't get block locations for " + src, l); - Assert.assertNotNull(l.getLocatedBlocks()); - Assert.assertTrue(l.getLocatedBlocks().size() > 0); + lbs = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1); + Assert.assertNotNull("Can't get block locations for " + src, lbs); + Assert.assertNotNull(lbs.getLocatedBlocks()); + Assert.assertTrue(lbs.getLocatedBlocks().size() > 0); done = true; - for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) { - done = (l.get(y).getLocations().length == repCount); + for (int y = 0; y < lbs.getLocatedBlocks().size() && done; y++) { + done = getLocatedBlockLocations(lbs.get(y)).length == repCount; } } while (!done); - for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) { - Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName()); + for (int y = 0; y < lbs.getLocatedBlocks().size() && done; y++) { + Assert.assertEquals(localhost, + getLocatedBlockLocations(lbs.get(y))[repCount - 1].getHostName()); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java index c6cbb6458c53..7561645f70b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.tool; import static org.apache.hadoop.hbase.HBaseTestingUtil.countRows; +import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; import static org.hamcrest.Matchers.greaterThan; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -731,7 +732,7 @@ private void verifyHFileFavoriteNode(Path p, AsyncTableRegionLocator regionLocat isFavoriteNode = false; final LocatedBlock block = locatedBlocks.get(index); - final DatanodeInfo[] locations = block.getLocations(); + final DatanodeInfo[] locations = getLocatedBlockLocations(block); for (DatanodeInfo location : locations) { final String hostName = location.getHostName(); From a80b341b6eda48a4c3584c27ccb835275777d94f Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Sat, 16 Sep 2023 20:34:10 +0530 Subject: [PATCH 078/514] HBASE-28066 Drop duplicate test class TestShellRSGroups.java (#5387) Signed-off-by: Duo Zhang --- .../client/rsgroup/TestShellRSGroups.java | 104 ------------------ 1 file changed, 104 deletions(-) delete mode 100644 hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java diff --git a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java deleted file mode 100644 index 380ad6163228..000000000000 --- a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client.rsgroup; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint; -import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; -import org.apache.hadoop.hbase.security.access.SecureTestUtil; -import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.jruby.embed.PathType; -import org.jruby.embed.ScriptingContainer; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -//Separate Shell test class for Groups -//Since we need to use a different balancer and run more than 1 RS -@Category({ClientTests.class, LargeTests.class}) -public class TestShellRSGroups { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestShellRSGroups.class); - - final Logger LOG = LoggerFactory.getLogger(getClass()); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static ScriptingContainer jruby = new ScriptingContainer(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - - // Start mini cluster - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); - TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); - TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - // Security setup configuration - SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); - VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration()); - - //Setup RegionServer Groups - TEST_UTIL.getConfiguration().set( - HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); - TEST_UTIL.getConfiguration().set( - CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - RSGroupAdminEndpoint.class.getName()); - - TEST_UTIL.startMiniCluster(4); - - // Configure jruby runtime - List loadPaths = new ArrayList<>(2); - loadPaths.add("src/test/ruby"); - jruby.setLoadPaths(loadPaths); - jruby.put("$TEST_CLUSTER", TEST_UTIL); - System.setProperty("jruby.jit.logging.verbose", "true"); - System.setProperty("jruby.jit.logging", "true"); - System.setProperty("jruby.native.verbose", "true"); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testRunShellTests() throws IOException { - try { - // Start only GroupShellTest - System.setProperty("shell.test", "Hbase::RSGroupShellTest"); - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); - } finally { - System.clearProperty("shell.test"); - } - } -} From ef7b85409a6e92647ec3094d617a0b8654e19824 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 17 Sep 2023 17:08:53 +0800 Subject: [PATCH 079/514] HBASE-28087 Add hadoop 3.3.6 in hadoopcheck (#5405) Signed-off-by: Nihal Jain --- dev-support/hbase-personality.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 48eb55e3eaf1..67aa2d1d168f 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -598,16 +598,16 @@ function hadoopcheck_rebuild if [[ "${PATCH_BRANCH}" = branch-2.4 ]]; then yetus_info "Setting Hadoop 3 versions to test based on branch-2.4 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.1.4 3.2.4 3.3.5" + hbase_hadoop3_versions="3.1.4 3.2.4 3.3.6" else - hbase_hadoop3_versions="3.1.1 3.1.2 3.1.3 3.1.4 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.3.0 3.3.1 3.3.2 3.3.3 3.3.4 3.3.5" + hbase_hadoop3_versions="3.1.1 3.1.2 3.1.3 3.1.4 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.3.0 3.3.1 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6" fi else yetus_info "Setting Hadoop 3 versions to test based on branch-2.5+/master/feature branch rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.2.4 3.3.5" + hbase_hadoop3_versions="3.2.4 3.3.6" else - hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5" + hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6" fi fi From 787d524d55c794d0703bcc3c15b4538ba96803e9 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 19 Sep 2023 07:54:11 +0800 Subject: [PATCH 080/514] HBASE-28090 Make entryReader field final in ReplicationSourceShipper class (#5409) Signed-off-by: Wellington Chevreuil --- .../RecoveredReplicationSource.java | 5 +++-- .../RecoveredReplicationSourceShipper.java | 4 ++-- .../regionserver/ReplicationSource.java | 14 ++++++------- .../ReplicationSourceShipper.java | 21 +++++++------------ .../regionserver/TestReplicationSource.java | 3 +-- 5 files changed, 20 insertions(+), 27 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index e740a01dc4f7..e9062472221c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -27,8 +27,9 @@ public class RecoveredReplicationSource extends ReplicationSource { @Override - protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId) { - return new RecoveredReplicationSourceShipper(conf, walGroupId, logQueue, this, queueStorage, + protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId, + ReplicationSourceWALReader walReader) { + return new RecoveredReplicationSourceShipper(conf, walGroupId, this, walReader, queueStorage, () -> { if (workerThreads.isEmpty()) { this.getSourceMetrics().clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java index 2bb3a7c3591c..ece566d96006 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java @@ -30,9 +30,9 @@ public class RecoveredReplicationSourceShipper extends ReplicationSourceShipper private final Runnable tryFinish; public RecoveredReplicationSourceShipper(Configuration conf, String walGroupId, - ReplicationSourceLogQueue logQueue, RecoveredReplicationSource source, + RecoveredReplicationSource source, ReplicationSourceWALReader walReader, ReplicationQueueStorage queueStorage, Runnable tryFinish) { - super(conf, walGroupId, logQueue, source); + super(conf, walGroupId, source, walReader); this.tryFinish = tryFinish; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index e4da44e9b13a..00be66c5c0fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -367,14 +367,13 @@ private void tryStartNewShipper(String walGroupId) { return value; } else { LOG.debug("{} starting shipping worker for walGroupId={}", logPeerId(), walGroupId); - ReplicationSourceShipper worker = createNewShipper(walGroupId); ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, getStartOffset(walGroupId)); + ReplicationSourceShipper worker = createNewShipper(walGroupId, walReader); Threads.setDaemonThreadRunning( walReader, Thread.currentThread().getName() + ".replicationSource.wal-reader." + walGroupId + "," + queueId, (t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); - worker.setWALReader(walReader); worker.startup((t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); return worker; } @@ -428,8 +427,9 @@ private long getFileSize(Path currentPath) throws IOException { return fileSize; } - protected ReplicationSourceShipper createNewShipper(String walGroupId) { - return new ReplicationSourceShipper(conf, walGroupId, logQueue, this); + protected ReplicationSourceShipper createNewShipper(String walGroupId, + ReplicationSourceWALReader walReader) { + return new ReplicationSourceShipper(conf, walGroupId, this, walReader); } private ReplicationSourceWALReader createNewWALReader(String walGroupId, long startPosition) { @@ -665,7 +665,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics) { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { + private void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { @@ -684,9 +684,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, bool for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); - if (worker.entryReader != null) { - worker.entryReader.setReaderRunning(false); - } + worker.entryReader.setReaderRunning(false); } if (this.replicationEndpoint != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index 7b863dc35ae9..6d0730d76b6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -55,9 +55,8 @@ public enum WorkerState { } private final Configuration conf; - protected final String walGroupId; - protected final ReplicationSourceLogQueue logQueue; - protected final ReplicationSource source; + final String walGroupId; + private final ReplicationSource source; // Last position in the log that we sent to ZooKeeper // It will be accessed by the stats thread so make it volatile @@ -66,22 +65,22 @@ public enum WorkerState { private Path currentPath; // Current state of the worker thread private volatile WorkerState state; - protected ReplicationSourceWALReader entryReader; + final ReplicationSourceWALReader entryReader; // How long should we sleep for each retry - protected final long sleepForRetries; + private final long sleepForRetries; // Maximum number of retries before taking bold actions - protected final int maxRetriesMultiplier; + private final int maxRetriesMultiplier; private final int DEFAULT_TIMEOUT = 20000; private final int getEntriesTimeout; private final int shipEditsTimeout; - public ReplicationSourceShipper(Configuration conf, String walGroupId, - ReplicationSourceLogQueue logQueue, ReplicationSource source) { + public ReplicationSourceShipper(Configuration conf, String walGroupId, ReplicationSource source, + ReplicationSourceWALReader walReader) { this.conf = conf; this.walGroupId = walGroupId; - this.logQueue = logQueue; this.source = source; + this.entryReader = walReader; // 1 second this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); // 5 minutes @ 1 sec per @@ -295,10 +294,6 @@ long getCurrentPosition() { return currentPosition; } - void setWALReader(ReplicationSourceWALReader entryReader) { - this.entryReader = entryReader; - } - protected boolean isActive() { return source.isSourceActive() && state == WorkerState.RUNNING && !isInterrupted(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 707bab875d22..53996c376647 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -291,8 +291,7 @@ public void testTerminateClearsBuffer() throws Exception { mock(MetricsSource.class)); ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, conf, null, 0, null, source, null); - ReplicationSourceShipper shipper = new ReplicationSourceShipper(conf, null, null, source); - shipper.entryReader = reader; + ReplicationSourceShipper shipper = new ReplicationSourceShipper(conf, null, source, reader); source.workerThreads.put("testPeer", shipper); WALEntryBatch batch = new WALEntryBatch(10, logDir); WAL.Entry mockEntry = mock(WAL.Entry.class); From 94e4055fd1deea94bb7041da563744808b404fe5 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Tue, 19 Sep 2023 10:16:33 -0400 Subject: [PATCH 081/514] HBASE-27981 Add connection and request attributes to slow log (#5335) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/client/OnlineLogRecord.java | 55 +++++++++++++-- .../hbase/shaded/protobuf/ProtobufUtil.java | 30 +++++++- .../hbase/client/TestOnlineLogRecord.java | 56 ++++++++++++++- .../protobuf/server/region/TooSlowLog.proto | 4 ++ .../hbase/namequeues/RpcLogDetails.java | 20 +++++- .../namequeues/impl/SlowLogQueueService.java | 18 ++++- .../regionserver/rsOperationDetails.jsp | 9 +++ .../namequeues/TestNamedQueueRecorder.java | 70 ++++++++++++++++++- 8 files changed, 250 insertions(+), 12 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java index 65e2f58f4529..d9fd51e80a95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Map; import java.util.Optional; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -29,6 +30,8 @@ import org.apache.hbase.thirdparty.com.google.gson.JsonObject; import org.apache.hbase.thirdparty.com.google.gson.JsonSerializer; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; + /** * Slow/Large Log payload for hbase-client, to be used by Admin API get_slow_responses and * get_large_responses @@ -53,6 +56,18 @@ final public class OnlineLogRecord extends LogEntry { if (slowLogPayload.getMultiServiceCalls() == 0) { jsonObj.remove("multiServiceCalls"); } + if (slowLogPayload.getRequestAttributes().isEmpty()) { + jsonObj.remove("requestAttributes"); + } else { + jsonObj.add("requestAttributes", gson + .toJsonTree(ProtobufUtil.deserializeAttributes(slowLogPayload.getRequestAttributes()))); + } + if (slowLogPayload.getConnectionAttributes().isEmpty()) { + jsonObj.remove("connectionAttributes"); + } else { + jsonObj.add("connectionAttributes", gson.toJsonTree( + ProtobufUtil.deserializeAttributes(slowLogPayload.getConnectionAttributes()))); + } if (slowLogPayload.getScan().isPresent()) { jsonObj.add("scan", gson.toJsonTree(slowLogPayload.getScan().get().toMap())); } else { @@ -79,6 +94,8 @@ final public class OnlineLogRecord extends LogEntry { private final int multiMutationsCount; private final int multiServiceCalls; private final Optional scan; + private final Map requestAttributes; + private final Map connectionAttributes; public long getStartTime() { return startTime; @@ -152,11 +169,20 @@ public Optional getScan() { return scan; } + public Map getRequestAttributes() { + return requestAttributes; + } + + public Map getConnectionAttributes() { + return connectionAttributes; + } + OnlineLogRecord(final long startTime, final int processingTime, final int queueTime, final long responseSize, final long blockBytesScanned, final String clientAddress, final String serverClass, final String methodName, final String callDetails, final String param, final String regionName, final String userName, final int multiGetsCount, - final int multiMutationsCount, final int multiServiceCalls, final Scan scan) { + final int multiMutationsCount, final int multiServiceCalls, final Scan scan, + final Map requestAttributes, final Map connectionAttributes) { this.startTime = startTime; this.processingTime = processingTime; this.queueTime = queueTime; @@ -173,6 +199,8 @@ public Optional getScan() { this.multiMutationsCount = multiMutationsCount; this.multiServiceCalls = multiServiceCalls; this.scan = Optional.ofNullable(scan); + this.requestAttributes = requestAttributes; + this.connectionAttributes = connectionAttributes; } public static class OnlineLogRecordBuilder { @@ -192,6 +220,8 @@ public static class OnlineLogRecordBuilder { private int multiMutationsCount; private int multiServiceCalls; private Scan scan = null; + private Map requestAttributes; + private Map connectionAttributes; public OnlineLogRecordBuilder setStartTime(long startTime) { this.startTime = startTime; @@ -276,10 +306,22 @@ public OnlineLogRecordBuilder setScan(Scan scan) { return this; } + public OnlineLogRecordBuilder setRequestAttributes(Map requestAttributes) { + this.requestAttributes = requestAttributes; + return this; + } + + public OnlineLogRecordBuilder + setConnectionAttributes(Map connectionAttributes) { + this.connectionAttributes = connectionAttributes; + return this; + } + public OnlineLogRecord build() { return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, blockBytesScanned, clientAddress, serverClass, methodName, callDetails, param, regionName, - userName, multiGetsCount, multiMutationsCount, multiServiceCalls, scan); + userName, multiGetsCount, multiMutationsCount, multiServiceCalls, scan, requestAttributes, + connectionAttributes); } } @@ -304,7 +346,8 @@ public boolean equals(Object o) { .append(serverClass, that.serverClass).append(methodName, that.methodName) .append(callDetails, that.callDetails).append(param, that.param) .append(regionName, that.regionName).append(userName, that.userName).append(scan, that.scan) - .isEquals(); + .append(requestAttributes, that.requestAttributes) + .append(connectionAttributes, that.connectionAttributes).isEquals(); } @Override @@ -313,7 +356,7 @@ public int hashCode() { .append(responseSize).append(blockBytesScanned).append(clientAddress).append(serverClass) .append(methodName).append(callDetails).append(param).append(regionName).append(userName) .append(multiGetsCount).append(multiMutationsCount).append(multiServiceCalls).append(scan) - .toHashCode(); + .append(requestAttributes).append(connectionAttributes).toHashCode(); } @Override @@ -330,7 +373,9 @@ public String toString() { .append("methodName", methodName).append("callDetails", callDetails).append("param", param) .append("regionName", regionName).append("userName", userName) .append("multiGetsCount", multiGetsCount).append("multiMutationsCount", multiMutationsCount) - .append("multiServiceCalls", multiServiceCalls).append("scan", scan).toString(); + .append("multiServiceCalls", multiServiceCalls).append("scan", scan) + .append("requestAttributes", requestAttributes) + .append("connectionAttributes", connectionAttributes).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index aa3cb39c5971..c14a0d042823 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2196,6 +2196,25 @@ public static SlowLogParams getSlowLogParams(Message message, boolean slowLogSca return new SlowLogParams(params); } + /** + * Convert a list of NameBytesPair to a more readable CSV + */ + public static String convertAttributesToCsv(List attributes) { + if (attributes.isEmpty()) { + return HConstants.EMPTY_STRING; + } + return deserializeAttributes(convertNameBytesPairsToMap(attributes)).entrySet().stream() + .map(entry -> entry.getKey() + " = " + entry.getValue()).collect(Collectors.joining(", ")); + } + + /** + * Convert a map of byte array attributes to a more readable map of binary string representations + */ + public static Map deserializeAttributes(Map attributes) { + return attributes.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, entry -> Bytes.toStringBinary(entry.getValue()))); + } + /** * Print out some subset of a MutationProto rather than all of it and its data * @param proto Protobuf to print out @@ -3389,7 +3408,10 @@ private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLog .setResponseSize(slowLogPayload.getResponseSize()) .setBlockBytesScanned(slowLogPayload.getBlockBytesScanned()) .setServerClass(slowLogPayload.getServerClass()).setStartTime(slowLogPayload.getStartTime()) - .setUserName(slowLogPayload.getUserName()); + .setUserName(slowLogPayload.getUserName()) + .setRequestAttributes(convertNameBytesPairsToMap(slowLogPayload.getRequestAttributeList())) + .setConnectionAttributes( + convertNameBytesPairsToMap(slowLogPayload.getConnectionAttributeList())); if (slowLogPayload.hasScan()) { try { onlineLogRecord.setScan(ProtobufUtil.toScan(slowLogPayload.getScan())); @@ -3400,6 +3422,12 @@ private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLog return onlineLogRecord.build(); } + private static Map + convertNameBytesPairsToMap(List nameBytesPairs) { + return nameBytesPairs.stream().collect(Collectors.toMap(NameBytesPair::getName, + nameBytesPair -> nameBytesPair.getValue().toByteArray())); + } + /** * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} * @param logEntry slowlog response protobuf instance diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java index 846738d82987..fe753973ae20 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -26,6 +29,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; + @Category({ ClientTests.class, SmallTests.class }) public class TestOnlineLogRecord { @@ -47,10 +53,56 @@ public void itSerializesScan() { + " \"maxResultSize\": -1,\n" + " \"families\": {},\n" + " \"caching\": -1,\n" + " \"maxVersions\": 1,\n" + " \"timeRange\": [\n" + " 0,\n" + " 9223372036854775807\n" + " ]\n" + " }\n" + "}"; - OnlineLogRecord o = - new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, 6, 7, 0, scan); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, + 6, 7, 0, scan, Collections.emptyMap(), Collections.emptyMap()); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); Assert.assertEquals(actualOutput, expectedOutput); } + + @Test + public void itSerializesRequestAttributes() { + Map requestAttributes = ImmutableMap. builder() + .put("r", Bytes.toBytes("1")).put("2", Bytes.toBytes(0.0)).build(); + Set expectedOutputs = + ImmutableSet. builder().add("requestAttributes").add("\"r\": \"1\"") + .add("\"2\": \"\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\"").build(); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, + 6, 7, 0, null, requestAttributes, Collections.emptyMap()); + String actualOutput = o.toJsonPrettyPrint(); + System.out.println(actualOutput); + expectedOutputs.forEach(expected -> Assert.assertTrue(actualOutput.contains(expected))); + } + + @Test + public void itOmitsEmptyRequestAttributes() { + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, + 6, 7, 0, null, Collections.emptyMap(), Collections.emptyMap()); + String actualOutput = o.toJsonPrettyPrint(); + System.out.println(actualOutput); + Assert.assertFalse(actualOutput.contains("requestAttributes")); + } + + @Test + public void itSerializesConnectionAttributes() { + Map connectionAttributes = ImmutableMap. builder() + .put("c", Bytes.toBytes("1")).put("2", Bytes.toBytes(0.0)).build(); + Set expectedOutputs = + ImmutableSet. builder().add("connectionAttributes").add("\"c\": \"1\"") + .add("\"2\": \"\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\"").build(); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, + 6, 7, 0, null, Collections.emptyMap(), connectionAttributes); + String actualOutput = o.toJsonPrettyPrint(); + System.out.println(actualOutput); + expectedOutputs.forEach(expected -> Assert.assertTrue(actualOutput.contains(expected))); + } + + @Test + public void itOmitsEmptyConnectionAttributes() { + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, + 6, 7, 0, null, Collections.emptyMap(), Collections.emptyMap()); + String actualOutput = o.toJsonPrettyPrint(); + System.out.println(actualOutput); + Assert.assertFalse(actualOutput.contains("connectionAttributes")); + } } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto index d0abdd1af75a..4c275948b277 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto @@ -27,6 +27,7 @@ option java_outer_classname = "TooSlowLog"; option java_generate_equals_and_hash = true; option optimize_for = SPEED; +import "HBase.proto"; import "client/Client.proto"; message SlowLogPayload { @@ -49,6 +50,9 @@ message SlowLogPayload { optional int64 block_bytes_scanned = 16; optional Scan scan = 17; + repeated NameBytesPair connection_attribute = 18; + repeated NameBytesPair request_attribute = 19; + // SLOW_LOG is RPC call slow in nature whereas LARGE_LOG is RPC call quite large. // Majority of times, slow logs are also large logs and hence, ALL is combination of // both diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java index c0baf21e4340..eb35d886bbb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.namequeues; +import java.util.Map; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.yetus.audience.InterfaceAudience; @@ -39,6 +40,8 @@ public class RpcLogDetails extends NamedQueuePayload { private final String className; private final boolean isSlowLog; private final boolean isLargeLog; + private final Map connectionAttributes; + private final Map requestAttributes; public RpcLogDetails(RpcCall rpcCall, Message param, String clientAddress, long responseSize, long blockBytesScanned, String className, boolean isSlowLog, boolean isLargeLog) { @@ -51,6 +54,12 @@ public RpcLogDetails(RpcCall rpcCall, Message param, String clientAddress, long this.className = className; this.isSlowLog = isSlowLog; this.isLargeLog = isLargeLog; + + // it's important to call getConnectionAttributes and getRequestAttributes here + // because otherwise the buffers may get released before the log details are processed which + // would result in corrupted attributes + this.connectionAttributes = rpcCall.getConnectionAttributes(); + this.requestAttributes = rpcCall.getRequestAttributes(); } public RpcCall getRpcCall() { @@ -85,11 +94,20 @@ public Message getParam() { return param; } + public Map getConnectionAttributes() { + return connectionAttributes; + } + + public Map getRequestAttributes() { + return requestAttributes; + } + @Override public String toString() { return new ToStringBuilder(this).append("rpcCall", rpcCall).append("param", param) .append("clientAddress", clientAddress).append("responseSize", responseSize) .append("className", className).append("isSlowLog", isSlowLog) - .append("isLargeLog", isLargeLog).toString(); + .append("isLargeLog", isLargeLog).append("connectionAttributes", connectionAttributes) + .append("requestAttributes", requestAttributes).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java index 48121a8b066a..fb29b8563ef7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hbase.namequeues.impl; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Queue; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -42,12 +44,14 @@ import org.apache.hbase.thirdparty.com.google.common.collect.EvictingQueue; import org.apache.hbase.thirdparty.com.google.common.collect.Queues; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog; /** @@ -164,7 +168,9 @@ public void consumeEventFromDisruptor(NamedQueuePayload namedQueuePayload) { .setProcessingTime(processingTime).setQueueTime(qTime) .setRegionName(slowLogParams != null ? slowLogParams.getRegionName() : StringUtils.EMPTY) .setResponseSize(responseSize).setBlockBytesScanned(blockBytesScanned) - .setServerClass(className).setStartTime(startTime).setType(type).setUserName(userName); + .setServerClass(className).setStartTime(startTime).setType(type).setUserName(userName) + .addAllRequestAttribute(buildNameBytesPairs(rpcLogDetails.getRequestAttributes())) + .addAllConnectionAttribute(buildNameBytesPairs(rpcLogDetails.getConnectionAttributes())); if (slowLogParams != null && slowLogParams.getScan() != null) { slowLogPayloadBuilder.setScan(slowLogParams.getScan()); } @@ -177,6 +183,16 @@ public void consumeEventFromDisruptor(NamedQueuePayload namedQueuePayload) { } } + private static Collection + buildNameBytesPairs(Map attributes) { + if (attributes == null) { + return Collections.emptySet(); + } + return attributes.entrySet().stream().map(attr -> HBaseProtos.NameBytesPair.newBuilder() + .setName(attr.getKey()).setValue(ByteString.copyFrom(attr.getValue())).build()) + .collect(Collectors.toSet()); + } + @Override public boolean clearNamedQueue() { if (!isOnlineLogProviderEnabled) { diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/rsOperationDetails.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/rsOperationDetails.jsp index a1ff23143bad..e8944b63f435 100644 --- a/hbase-server/src/main/resources/hbase-webapps/regionserver/rsOperationDetails.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/rsOperationDetails.jsp @@ -26,6 +26,7 @@ import="org.apache.hadoop.hbase.regionserver.HRegionServer" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog" + import="org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil" import="org.apache.hadoop.hbase.namequeues.NamedQueueRecorder" import="org.apache.hadoop.hbase.namequeues.RpcLogDetails" import="org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest" @@ -108,6 +109,8 @@ MultiService Calls Call Details Param + Request Attributes + Connection Attributes <% if (slowLogs != null && !slowLogs.isEmpty()) {%> <% for (TooSlowLog.SlowLogPayload r : slowLogs) { %> @@ -127,6 +130,8 @@ <%=r.getMultiServiceCalls()%> <%=r.getCallDetails()%> <%=r.getParam()%> + <%=ProtobufUtil.convertAttributesToCsv(r.getRequestAttributeList())%> + <%=ProtobufUtil.convertAttributesToCsv(r.getConnectionAttributeList())%> <% } %> <% } %> @@ -151,6 +156,8 @@ MultiService Calls Call Details Param + Request Attributes + Connection Attributes <% if (largeLogs != null && !largeLogs.isEmpty()) {%> <% for (TooSlowLog.SlowLogPayload r : largeLogs) { %> @@ -170,6 +177,8 @@ <%=r.getMultiServiceCalls()%> <%=r.getCallDetails()%> <%=r.getParam()%> + <%=ProtobufUtil.convertAttributesToCsv(r.getRequestAttributeList())%> + <%=ProtobufUtil.convertAttributesToCsv(r.getConnectionAttributeList())%> <% } %> <% } %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index c24b364a2277..af6c51260fd5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -28,6 +28,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -47,6 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; @@ -72,6 +74,20 @@ public class TestNamedQueueRecorder { private static final Logger LOG = LoggerFactory.getLogger(TestNamedQueueRecorder.class); private static final HBaseTestingUtil HBASE_TESTING_UTILITY = new HBaseTestingUtil(); + private static final List REQUEST_HEADERS = + ImmutableList. builder() + .add(HBaseProtos.NameBytesPair.newBuilder().setName("1") + .setValue(ByteString.copyFromUtf8("r")).build()) + .add(HBaseProtos.NameBytesPair.newBuilder().setName("2") + .setValue(ByteString.copyFromUtf8("h")).build()) + .build(); + private static final List CONNECTION_HEADERS = + ImmutableList. builder() + .add(HBaseProtos.NameBytesPair.newBuilder().setName("1") + .setValue(ByteString.copyFromUtf8("c")).build()) + .add(HBaseProtos.NameBytesPair.newBuilder().setName("2") + .setValue(ByteString.copyFromUtf8("h")).build()) + .build(); private NamedQueueRecorder namedQueueRecorder; @@ -600,6 +616,54 @@ public void testOnlineSlowLogScanPayloadExplicitlyEnabled() throws Exception { })); } + @Test + public void testOnlineSlowLogRequestAttributes() throws Exception { + Configuration conf = applySlowLogRecorderConf(1); + Constructor constructor = + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + constructor.setAccessible(true); + namedQueueRecorder = constructor.newInstance(conf); + AdminProtos.SlowLogResponseRequest request = + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(1).build(); + + Assert.assertEquals(getSlowLogPayloads(request).size(), 0); + LOG.debug("Initially ringbuffer of Slow Log records is empty"); + RpcLogDetails rpcLogDetails = getRpcLogDetailsOfScan(); + namedQueueRecorder.addRecord(rpcLogDetails); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + Optional slowLogPayload = getSlowLogPayloads(request).stream().findAny(); + if (slowLogPayload.isPresent() && !slowLogPayload.get().getRequestAttributeList().isEmpty()) { + return slowLogPayload.get().getRequestAttributeList().containsAll(REQUEST_HEADERS); + } + return false; + })); + } + + @Test + public void testOnlineSlowLogConnectionAttributes() throws Exception { + Configuration conf = applySlowLogRecorderConf(1); + Constructor constructor = + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + constructor.setAccessible(true); + namedQueueRecorder = constructor.newInstance(conf); + AdminProtos.SlowLogResponseRequest request = + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(1).build(); + + Assert.assertEquals(getSlowLogPayloads(request).size(), 0); + LOG.debug("Initially ringbuffer of Slow Log records is empty"); + RpcLogDetails rpcLogDetails = getRpcLogDetailsOfScan(); + namedQueueRecorder.addRecord(rpcLogDetails); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + Optional slowLogPayload = getSlowLogPayloads(request).stream().findAny(); + if ( + slowLogPayload.isPresent() && !slowLogPayload.get().getConnectionAttributeList().isEmpty() + ) { + return slowLogPayload.get().getConnectionAttributeList().containsAll(CONNECTION_HEADERS); + } + return false; + })); + } + static RpcLogDetails getRpcLogDetails(String userName, String clientAddress, String className, int forcedParamIndex) { RpcCall rpcCall = getRpcCall(userName, forcedParamIndex); @@ -697,12 +761,14 @@ public RPCProtos.RequestHeader getHeader() { @Override public Map getConnectionAttributes() { - return null; + return CONNECTION_HEADERS.stream().collect(Collectors + .toMap(HBaseProtos.NameBytesPair::getName, pair -> pair.getValue().toByteArray())); } @Override public Map getRequestAttributes() { - return null; + return REQUEST_HEADERS.stream().collect(Collectors.toMap(HBaseProtos.NameBytesPair::getName, + pair -> pair.getValue().toByteArray())); } @Override From 8b2ca86d508f5047ddeeb65c21e3f141ae571f6b Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Wed, 20 Sep 2023 13:14:50 +0530 Subject: [PATCH 082/514] HBASE-28089 Upgrade BouncyCastle to fix CVE-2023-33201 (#5407) - Upgrades to v1.76, i.e. the latest version - Replaces *-jdk15on with *-jdk18on - Excludes *-jdk15on from everywhere else, to avoid conflicts with *-jdk18on Signed-off-by: Duo Zhang Reviewed-by: Aman Poonia --- hbase-asyncfs/pom.xml | 2 +- hbase-common/pom.xml | 4 +-- hbase-endpoint/pom.xml | 2 +- hbase-examples/pom.xml | 2 +- hbase-http/pom.xml | 14 ++++++++- hbase-mapreduce/pom.xml | 2 +- .../main/resources/supplemental-models.xml | 4 +-- hbase-rest/pom.xml | 2 +- hbase-server/pom.xml | 4 +-- pom.xml | 31 +++++++++++++++++-- 10 files changed, 52 insertions(+), 15 deletions(-) diff --git a/hbase-asyncfs/pom.xml b/hbase-asyncfs/pom.xml index ace6dbe4bf20..08d619e46cbb 100644 --- a/hbase-asyncfs/pom.xml +++ b/hbase-asyncfs/pom.xml @@ -75,7 +75,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 45f5c358a90c..fc3136e05558 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -154,12 +154,12 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on test diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index 59a625b4bbc5..e024fcae002e 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -101,7 +101,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 84bf9bb826f6..1a5ca5bd09aa 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -152,7 +152,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index eeaa83cbf8b5..f229270a47de 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -107,7 +107,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test @@ -170,12 +170,24 @@ apacheds-core ${apacheds.version} test + + + org.bouncycastle + bcprov-jdk15on + + org.apache.directory.server apacheds-protocol-ldap ${apacheds.version} test + + + org.bouncycastle + bcprov-jdk15on + + org.apache.directory.server diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 68e88141d62d..3d9877dbf787 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -213,7 +213,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml b/hbase-resource-bundle/src/main/resources/supplemental-models.xml index dd60a7ddc1f3..b7204d71acc8 100644 --- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml +++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml @@ -586,10 +586,10 @@ under the License. org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on - + MIT License http://www.opensource.org/licenses/mit-license.php diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 8c356ef518ba..0368e713bdc2 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -230,7 +230,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index f9b01535185e..2455c199cb2b 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -331,12 +331,12 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on test org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on test diff --git a/pom.xml b/pom.xml index 10286b31512f..e43a27c6afee 100644 --- a/pom.xml +++ b/pom.xml @@ -856,7 +856,7 @@ 2.1.43 1.0.57 2.12.2 - 1.70 + 1.76 1.5.1 1.0.1 1.1.0 @@ -1621,7 +1621,7 @@ org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on ${bouncycastle.version} test @@ -1633,7 +1633,7 @@ org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on ${bouncycastle.version} test @@ -2399,6 +2399,23 @@ + + banned-bouncycastle-jdk15on + + enforce + + + + + + org.bouncycastle:*-jdk15on + + Use org.bouncycastle:*-jdk18on instead + true + + + + check-aggregate-license @@ -4070,6 +4087,14 @@ org.slf4j slf4j-reload4j + + org.bouncycastle + bcprov-jdk15on + + + org.bouncycastle + bcpkix-jdk15on + From 93d90bf64d6dfbdfbdd712f2748857ae282a3014 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 20 Sep 2023 21:39:16 +0800 Subject: [PATCH 083/514] HBASE-28101 Should check the return value of protobuf Message.mergeDelimitedFrom (#5413) Signed-off-by: GeorryHuang --- .../apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 700093a30274..34869314bab0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import io.opentelemetry.context.Scope; +import java.io.EOFException; import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -168,7 +169,12 @@ private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOExcep Message value; if (call.responseDefaultType != null) { Message.Builder builder = call.responseDefaultType.newBuilderForType(); - builder.mergeDelimitedFrom(in); + if (!builder.mergeDelimitedFrom(in)) { + // The javadoc of mergeDelimitedFrom says returning false means the stream reaches EOF + // before reading any bytes out, so here we need to manually throw the EOFException out + throw new EOFException( + "EOF while reading response with type: " + call.responseDefaultType.getClass().getName()); + } value = builder.build(); } else { value = null; From 36888e3762bf0a406ebb2ce60d6decbf865a76d3 Mon Sep 17 00:00:00 2001 From: Rahul Kumar Date: Wed, 20 Sep 2023 20:47:37 +0530 Subject: [PATCH 084/514] =?UTF-8?q?HBASE-28068=20Add=20hbase.normalizer.me?= =?UTF-8?q?rge.merge=5Frequest=5Fmax=5Fnumber=5Fof=5Fregions=20property=20?= =?UTF-8?q?=E2=80=A6=20(#5403)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Authored-by: Rahul Kumar Signed-off-by: Nick Dimiduk --- .../src/main/resources/hbase-default.xml | 6 ++ .../normalizer/SimpleRegionNormalizer.java | 55 ++++++++++++++----- .../TestSimpleRegionNormalizer.java | 36 ++++++++++++ 3 files changed, 84 insertions(+), 13 deletions(-) diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index d2ecef6bda3d..61eb4a0059fa 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -656,6 +656,12 @@ possible configurations would overwhelm and obscure the important. The minimum size for a region to be considered for a merge, in whole MBs. + + hbase.normalizer.merge.merge_request_max_number_of_regions + 50 + The maximum number of region count in a merge request for merge + normalization. + hbase.table.normalization.enabled false diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index dfae394b75a6..934b8de1b94c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -81,6 +81,9 @@ class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver static final int DEFAULT_MERGE_MIN_REGION_AGE_DAYS = 3; static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb"; static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 0; + static final String MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT_KEY = + "hbase.normalizer.merge.merge_request_max_number_of_regions"; + static final long DEFAULT_MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT = 100; private MasterServices masterServices; private NormalizerConfiguration normalizerConfiguration; @@ -138,6 +141,16 @@ private static long parseMergeMinRegionSizeMb(final Configuration conf) { return settledValue; } + private static long parseMergeRequestMaxNumberOfRegionsCount(final Configuration conf) { + final long parsedValue = conf.getLong(MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT_KEY, + DEFAULT_MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT); + final long settledValue = Math.max(2, parsedValue); + if (parsedValue != settledValue) { + warnInvalidValue(MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT_KEY, parsedValue, settledValue); + } + return settledValue; + } + private static void warnInvalidValue(final String key, final T parsedValue, final T settledValue) { LOG.warn("Configured value {}={} is invalid. Setting value to {}.", key, parsedValue, @@ -186,6 +199,10 @@ public long getMergeMinRegionSizeMb() { return normalizerConfiguration.getMergeMinRegionSizeMb(); } + public long getMergeRequestMaxNumberOfRegionsCount() { + return normalizerConfiguration.getMergeRequestMaxNumberOfRegionsCount(); + } + @Override public void setMasterServices(final MasterServices masterServices) { this.masterServices = masterServices; @@ -382,19 +399,21 @@ private List computeMergeNormalizationPlans(final NormalizeCo break; } if ( - rangeMembers.isEmpty() // when there are no range members, seed the range with whatever - // we have. this way we're prepared in case the next region is - // 0-size. - || (rangeMembers.size() == 1 && sumRangeMembersSizeMb == 0) // when there is only one - // region and the size is 0, - // seed the range with - // whatever we have. - || regionSizeMb == 0 // always add an empty region to the current range. - || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb) - ) { // add the current region - // to the range when - // there's capacity - // remaining. + // when there are no range members, seed the range with whatever we have. this way we're + // prepared in case the next region is 0-size. + rangeMembers.isEmpty() + // when there is only one region and the size is 0, seed the range with whatever we + // have. + || (rangeMembers.size() == 1 && sumRangeMembersSizeMb == 0) + // add an empty region to the current range only if it doesn't exceed max merge request + // region count + || (regionSizeMb == 0 && rangeMembers.size() < getMergeRequestMaxNumberOfRegionsCount()) + // add region if current range region size is less than avg region size of table + // and current range doesn't exceed max merge request region count + || ((regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb) + && (rangeMembers.size() < getMergeRequestMaxNumberOfRegionsCount())) + ) { + // add the current region to the range when there's capacity remaining. rangeMembers.add(new NormalizationTarget(regionInfo, regionSizeMb)); sumRangeMembersSizeMb += regionSizeMb; continue; @@ -502,6 +521,7 @@ private static final class NormalizerConfiguration { private final int mergeMinRegionCount; private final Period mergeMinRegionAge; private final long mergeMinRegionSizeMb; + private final long mergeRequestMaxNumberOfRegionsCount; private final long cumulativePlansSizeLimitMb; private NormalizerConfiguration() { @@ -511,6 +531,7 @@ private NormalizerConfiguration() { mergeMinRegionCount = DEFAULT_MERGE_MIN_REGION_COUNT; mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + mergeRequestMaxNumberOfRegionsCount = DEFAULT_MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT; cumulativePlansSizeLimitMb = DEFAULT_CUMULATIVE_SIZE_LIMIT_MB; } @@ -522,6 +543,7 @@ private NormalizerConfiguration(final Configuration conf, mergeMinRegionCount = parseMergeMinRegionCount(conf); mergeMinRegionAge = parseMergeMinRegionAge(conf); mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + mergeRequestMaxNumberOfRegionsCount = parseMergeRequestMaxNumberOfRegionsCount(conf); cumulativePlansSizeLimitMb = conf.getLong(CUMULATIVE_SIZE_LIMIT_MB_KEY, DEFAULT_CUMULATIVE_SIZE_LIMIT_MB); logConfigurationUpdated(SPLIT_ENABLED_KEY, currentConfiguration.isSplitEnabled(), @@ -534,6 +556,9 @@ private NormalizerConfiguration(final Configuration conf, currentConfiguration.getMergeMinRegionAge(), mergeMinRegionAge); logConfigurationUpdated(MERGE_MIN_REGION_SIZE_MB_KEY, currentConfiguration.getMergeMinRegionSizeMb(), mergeMinRegionSizeMb); + logConfigurationUpdated(MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT_KEY, + currentConfiguration.getMergeRequestMaxNumberOfRegionsCount(), + mergeRequestMaxNumberOfRegionsCount); } public Configuration getConf() { @@ -597,6 +622,10 @@ public long getMergeMinRegionSizeMb(NormalizeContext context) { return mergeMinRegionSizeMb; } + public long getMergeRequestMaxNumberOfRegionsCount() { + return mergeRequestMaxNumberOfRegionsCount; + } + private long getCumulativePlansSizeLimitMb() { return cumulativePlansSizeLimitMb; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 5dba036bb705..902205c74636 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MERGE_MIN_REGION_AGE_DAYS_KEY; import static org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MERGE_MIN_REGION_COUNT_KEY; import static org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MERGE_MIN_REGION_SIZE_MB_KEY; +import static org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT_KEY; import static org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MIN_REGION_COUNT_KEY; import static org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.SPLIT_ENABLED_KEY; import static org.hamcrest.MatcherAssert.assertThat; @@ -503,6 +504,41 @@ public void testHonorsMergeMinRegionSizeInTD() { assertThat(normalizer.computePlansForTable(tableDescriptor), empty()); } + @Test + public void testHonorsMergeRequestMaxNumberOfRegionsCount() { + conf.setBoolean(SPLIT_ENABLED_KEY, false); + conf.setInt(MERGE_MIN_REGION_COUNT_KEY, 1); + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + conf.setInt(MERGE_REQUEST_MAX_NUMBER_OF_REGIONS_COUNT_KEY, 3); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 5); + final Map regionSizes = createRegionSizesMap(regionInfos, 0, 1, 0, 1, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + assertEquals(3, normalizer.getMergeRequestMaxNumberOfRegionsCount()); + List plans = normalizer.computePlansForTable(tableDescriptor); + assertThat(plans, + contains( + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1).addTarget(regionInfos.get(2), 0).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(3), 1) + .addTarget(regionInfos.get(4), 0).build())); + } + + @Test + public void testHonorsMergeRequestMaxNumberOfRegionsCountDefault() { + conf.setBoolean(SPLIT_ENABLED_KEY, false); + conf.setInt(MERGE_MIN_REGION_COUNT_KEY, 1); + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 3); + final Map regionSizes = createRegionSizesMap(regionInfos, 0, 0, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + assertEquals(50, normalizer.getMergeRequestMaxNumberOfRegionsCount()); + List plans = normalizer.computePlansForTable(tableDescriptor); + assertThat(plans, contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 0).addTarget(regionInfos.get(2), 0).build())); + } + @Test public void testMergeEmptyRegions0() { conf.setBoolean(SPLIT_ENABLED_KEY, false); From 20c4136bf6b842e8c5fc5010cce9fb76364c3d11 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 21 Sep 2023 12:06:39 +0200 Subject: [PATCH 085/514] HBASE-28065 Corrupt HFile data is mishandled in several cases * when no block size is provided and there's not a preread headerBuf, treat the value with caution. * verify HBase checksums before making use of the block header. * inline verifyOnDiskSizeMatchesHeader to keep throw/return logic in the method body. * separate validation of onDiskSizeWithHeader as input parameter from as read from block header * simplify branching around fetching and populating onDiskSizeWithHeader. * inline retrieving nextOnDiskBlockSize ; add basic validation. * whenever a read is determined to be corrupt and fallback to HDFS checksum is necessary, also invalidate the cached value of headerBuf. * build out a test suite covering various forms of block header corruption, for blocks in first and second positions. Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/io/hfile/HFileBlock.java | 154 +++-- .../hadoop/hbase/io/hfile/TestChecksum.java | 2 +- .../hadoop/hbase/io/hfile/TestHFile.java | 7 +- .../hfile/TestHFileBlockHeaderCorruption.java | 529 ++++++++++++++++++ 4 files changed, 640 insertions(+), 52 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b4bb2fb2c900..a3ead34730fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -392,12 +392,12 @@ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final /** * Parse total on disk size including header and checksum. - * @param headerBuf Header ByteBuffer. Presumed exact size of header. - * @param verifyChecksum true if checksum verification is in use. + * @param headerBuf Header ByteBuffer. Presumed exact size of header. + * @param checksumSupport true if checksum verification is in use. * @return Size of the block with header included. */ - private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean verifyChecksum) { - return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(verifyChecksum); + private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean checksumSupport) { + return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(checksumSupport); } /** @@ -1597,33 +1597,48 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean } /** - * Returns Check onDiskSizeWithHeaderL size is healthy and then return it as an int + * Check that {@code value} read from a block header seems reasonable, within a large margin of + * error. + * @return {@code true} if the value is safe to proceed, {@code false} otherwise. */ - private static int checkAndGetSizeAsInt(final long onDiskSizeWithHeaderL, final int hdrSize) - throws IOException { - if ( - (onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1) - || onDiskSizeWithHeaderL >= Integer.MAX_VALUE - ) { - throw new IOException( - "Invalid onDisksize=" + onDiskSizeWithHeaderL + ": expected to be at least " + hdrSize - + " and at most " + Integer.MAX_VALUE + ", or -1"); + private boolean checkOnDiskSizeWithHeader(int value) { + if (value < 0) { + if (LOG.isTraceEnabled()) { + LOG.trace( + "onDiskSizeWithHeader={}; value represents a size, so it should never be negative.", + value); + } + return false; + } + if (value - hdrSize < 0) { + if (LOG.isTraceEnabled()) { + LOG.trace("onDiskSizeWithHeader={}, hdrSize={}; don't accept a value that is negative" + + " after the header size is excluded.", value, hdrSize); + } + return false; } - return (int) onDiskSizeWithHeaderL; + return true; } /** - * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something is - * not right. + * Check that {@code value} provided by the calling context seems reasonable, within a large + * margin of error. + * @return {@code true} if the value is safe to proceed, {@code false} otherwise. */ - private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf, - final long offset, boolean verifyChecksum) throws IOException { - // Assert size provided aligns with what is in the header - int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum); - if (passedIn != fromHeader) { - throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader - + ", offset=" + offset + ", fileContext=" + this.fileContext); + private boolean checkCallerProvidedOnDiskSizeWithHeader(long value) { + // same validation logic as is used by Math.toIntExact(long) + int intValue = (int) value; + if (intValue != value) { + if (LOG.isTraceEnabled()) { + LOG.trace("onDiskSizeWithHeaderL={}; value exceeds int size limits.", value); + } + return false; + } + if (intValue == -1) { + // a magic value we expect to see. + return true; } + return checkOnDiskSizeWithHeader(intValue); } /** @@ -1654,14 +1669,16 @@ private void cacheNextBlockHeader(final long offset, ByteBuff onDiskBlock, this.prefetchedHeader.set(ph); } - private int getNextBlockOnDiskSize(boolean readNextHeader, ByteBuff onDiskBlock, - int onDiskSizeWithHeader) { - int nextBlockOnDiskSize = -1; - if (readNextHeader) { - nextBlockOnDiskSize = - onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) + hdrSize; - } - return nextBlockOnDiskSize; + /** + * Clear the cached value when its integrity is suspect. + */ + private void invalidateNextBlockHeader() { + prefetchedHeader.set(null); + } + + private int getNextBlockOnDiskSize(ByteBuff onDiskBlock, int onDiskSizeWithHeader) { + return onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) + + hdrSize; } private ByteBuff allocate(int size, boolean intoHeap) { @@ -1687,17 +1704,21 @@ private ByteBuff allocate(int size, boolean intoHeap) { protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, boolean intoHeap) throws IOException { + final Span span = Span.current(); + final AttributesBuilder attributesBuilder = Attributes.builder(); + Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY)) + .ifPresent(c -> c.accept(attributesBuilder)); if (offset < 0) { throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize=" + onDiskSizeWithHeaderL + ")"); } + if (!checkCallerProvidedOnDiskSizeWithHeader(onDiskSizeWithHeaderL)) { + LOG.trace("Caller provided invalid onDiskSizeWithHeaderL={}", onDiskSizeWithHeaderL); + onDiskSizeWithHeaderL = -1; + } + int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL; - final Span span = Span.current(); - final AttributesBuilder attributesBuilder = Attributes.builder(); - Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY)) - .ifPresent(c -> c.accept(attributesBuilder)); - int onDiskSizeWithHeader = checkAndGetSizeAsInt(onDiskSizeWithHeaderL, hdrSize); - // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1 + // Try to use the cached header. Will serve us in rare case where onDiskSizeWithHeaderL==-1 // and will save us having to seek the stream backwards to reread the header we // read the last time through here. ByteBuff headerBuf = getCachedHeader(offset); @@ -1711,8 +1732,8 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, // file has support for checksums (version 2+). boolean checksumSupport = this.fileContext.isUseHBaseChecksum(); long startTime = EnvironmentEdgeManager.currentTime(); - if (onDiskSizeWithHeader <= 0) { - // We were not passed the block size. Need to get it from the header. If header was + if (onDiskSizeWithHeader == -1) { + // The caller does not know the block size. Need to get it from the header. If header was // not cached (see getCachedHeader above), need to seek to pull it in. This is costly // and should happen very rarely. Currently happens on open of a hfile reader where we // read the trailer blocks to pull in the indices. Otherwise, we are reading block sizes @@ -1729,6 +1750,19 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, } onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); } + + // The common case is that onDiskSizeWithHeader was produced by a read without checksum + // validation, so give it a sanity check before trying to use it. + if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeader)) { + if (verifyChecksum) { + invalidateNextBlockHeader(); + span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build()); + return null; + } else { + throw new IOException("Invalid onDiskSizeWithHeader=" + onDiskSizeWithHeader); + } + } + int preReadHeaderSize = headerBuf == null ? 0 : hdrSize; // Allocate enough space to fit the next block's header too; saves a seek next time through. // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header; @@ -1745,19 +1779,49 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, boolean readNextHeader = readAtOffset(is, onDiskBlock, onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread); onDiskBlock.rewind(); // in case of moving position when copying a cached header - int nextBlockOnDiskSize = - getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader); + + // the call to validateChecksum for this block excludes the next block header over-read, so + // no reason to delay extracting this value. + int nextBlockOnDiskSize = -1; + if (readNextHeader) { + int parsedVal = getNextBlockOnDiskSize(onDiskBlock, onDiskSizeWithHeader); + if (checkOnDiskSizeWithHeader(parsedVal)) { + nextBlockOnDiskSize = parsedVal; + } + } if (headerBuf == null) { headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize); } - // Do a few checks before we go instantiate HFileBlock. - assert onDiskSizeWithHeader > this.hdrSize; - verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, checksumSupport); + ByteBuff curBlock = onDiskBlock.duplicate().position(0).limit(onDiskSizeWithHeader); // Verify checksum of the data before using it for building HFileBlock. if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) { + invalidateNextBlockHeader(); + span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build()); return null; } + + // TODO: is this check necessary or can we proceed with a provided value regardless of + // what is in the header? + int fromHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); + if (onDiskSizeWithHeader != fromHeader) { + if (LOG.isTraceEnabled()) { + LOG.trace("Passed in onDiskSizeWithHeader={} != {}, offset={}, fileContext={}", + onDiskSizeWithHeader, fromHeader, offset, this.fileContext); + } + if (checksumSupport && verifyChecksum) { + // This file supports HBase checksums and verification of those checksums was + // requested. The block size provided by the caller (presumably from the block index) + // does not match the block size written to the block header. treat this as + // HBase-checksum failure. + span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build()); + invalidateNextBlockHeader(); + return null; + } + throw new IOException("Passed in onDiskSizeWithHeader=" + onDiskSizeWithHeader + " != " + + fromHeader + ", offset=" + offset + ", fileContext=" + this.fileContext); + } + // remove checksum from buffer now that it's verified int sizeWithoutChecksum = curBlock.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); curBlock.limit(sizeWithoutChecksum); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java index fdd31fc4cf20..707a8b84c620 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java @@ -61,7 +61,7 @@ public class TestChecksum { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestChecksum.class); - private static final Logger LOG = LoggerFactory.getLogger(TestHFileBlock.class); + private static final Logger LOG = LoggerFactory.getLogger(TestChecksum.class); static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { NONE, GZ }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index e33708022203..7624e2197914 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -163,12 +163,7 @@ public void testReaderWithoutBlockCache() throws Exception { fillByteBuffAllocator(alloc, bufCount); // start write to store file. Path path = writeStoreFile(); - try { - readStoreFile(path, conf, alloc); - } catch (Exception e) { - // fail test - assertTrue(false); - } + readStoreFile(path, conf, alloc); Assert.assertEquals(bufCount, alloc.getFreeBufferCount()); alloc.clean(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java new file mode 100644 index 000000000000..f74833a3b5eb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java @@ -0,0 +1,529 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.time.Instant; +import java.util.LinkedList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Random; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilder; +import org.apache.hadoop.hbase.CellBuilderFactory; +import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.ExternalResource; +import org.junit.rules.RuleChain; +import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This test provides coverage for HFileHeader block fields that are read and interpreted before + * HBase checksum validation can be applied. As of now, this is just + * {@code onDiskSizeWithoutHeader}. + */ +@Category({ IOTests.class, SmallTests.class }) +public class TestHFileBlockHeaderCorruption { + + private static final Logger LOG = LoggerFactory.getLogger(TestHFileBlockHeaderCorruption.class); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHFileBlockHeaderCorruption.class); + + private final HFileTestRule hFileTestRule; + + @Rule + public final RuleChain ruleChain; + + public TestHFileBlockHeaderCorruption() throws IOException { + TestName testName = new TestName(); + hFileTestRule = new HFileTestRule(new HBaseTestingUtil(), testName); + ruleChain = RuleChain.outerRule(testName).around(hFileTestRule); + } + + @Test + public void testOnDiskSizeWithoutHeaderCorruptionFirstBlock() throws Exception { + HFileBlockChannelPosition firstBlock = null; + try { + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + assertTrue(it.hasNext()); + firstBlock = it.next(); + } + + Corrupter c = new Corrupter(firstBlock); + + logHeader(firstBlock); + c.write(HFileBlock.Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX, + ByteBuffer.wrap(Bytes.toBytes(Integer.MIN_VALUE))); + logHeader(firstBlock); + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + CountingConsumer consumer = new CountingConsumer(it); + try { + consumer.readFully(); + fail(); + } catch (Exception e) { + assertThat(e, new IsThrowableMatching().withInstanceOf(IOException.class) + .withMessage(startsWith("Invalid onDiskSizeWithHeader="))); + } + assertEquals(0, consumer.getItemsRead()); + } + + c.restore(); + c.write(HFileBlock.Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX, + ByteBuffer.wrap(Bytes.toBytes(0))); + logHeader(firstBlock); + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + CountingConsumer consumer = new CountingConsumer(it); + try { + consumer.readFully(); + fail(); + } catch (Exception e) { + assertThat(e, new IsThrowableMatching().withInstanceOf(IllegalArgumentException.class)); + } + assertEquals(0, consumer.getItemsRead()); + } + + c.restore(); + c.write(HFileBlock.Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX, + ByteBuffer.wrap(Bytes.toBytes(Integer.MAX_VALUE))); + logHeader(firstBlock); + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + CountingConsumer consumer = new CountingConsumer(it); + try { + consumer.readFully(); + fail(); + } catch (Exception e) { + assertThat(e, new IsThrowableMatching().withInstanceOf(IOException.class) + .withMessage(startsWith("Invalid onDiskSizeWithHeader="))); + } + assertEquals(0, consumer.getItemsRead()); + } + } finally { + if (firstBlock != null) { + firstBlock.close(); + } + } + } + + @Test + public void testOnDiskSizeWithoutHeaderCorruptionSecondBlock() throws Exception { + HFileBlockChannelPosition secondBlock = null; + try { + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + assertTrue(it.hasNext()); + it.next(); + assertTrue(it.hasNext()); + secondBlock = it.next(); + } + + Corrupter c = new Corrupter(secondBlock); + + logHeader(secondBlock); + c.write(HFileBlock.Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX, + ByteBuffer.wrap(Bytes.toBytes(Integer.MIN_VALUE))); + logHeader(secondBlock); + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + CountingConsumer consumer = new CountingConsumer(it); + try { + consumer.readFully(); + fail(); + } catch (Exception e) { + assertThat(e, new IsThrowableMatching().withInstanceOf(IOException.class) + .withMessage(startsWith("Invalid onDiskSizeWithHeader="))); + } + assertEquals(1, consumer.getItemsRead()); + } + + c.restore(); + c.write(HFileBlock.Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX, + ByteBuffer.wrap(Bytes.toBytes(0))); + logHeader(secondBlock); + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + CountingConsumer consumer = new CountingConsumer(it); + try { + consumer.readFully(); + fail(); + } catch (Exception e) { + assertThat(e, new IsThrowableMatching().withInstanceOf(IllegalArgumentException.class)); + } + assertEquals(1, consumer.getItemsRead()); + } + + c.restore(); + c.write(HFileBlock.Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX, + ByteBuffer.wrap(Bytes.toBytes(Integer.MAX_VALUE))); + logHeader(secondBlock); + try (HFileBlockChannelPositionIterator it = + new HFileBlockChannelPositionIterator(hFileTestRule)) { + CountingConsumer consumer = new CountingConsumer(it); + try { + consumer.readFully(); + fail(); + } catch (Exception e) { + assertThat(e, new IsThrowableMatching().withInstanceOf(IOException.class) + .withMessage(startsWith("Invalid onDiskSizeWithHeader="))); + } + assertEquals(1, consumer.getItemsRead()); + } + } finally { + if (secondBlock != null) { + secondBlock.close(); + } + } + } + + private static void logHeader(HFileBlockChannelPosition hbcp) throws IOException { + ByteBuff buf = ByteBuff.wrap(ByteBuffer.allocate(HFileBlock.headerSize(true))); + hbcp.rewind(); + assertEquals(buf.capacity(), buf.read(hbcp.getChannel())); + buf.rewind(); + hbcp.rewind(); + logHeader(buf); + } + + private static void logHeader(ByteBuff buf) { + byte[] blockMagic = new byte[8]; + buf.get(blockMagic); + int onDiskSizeWithoutHeader = buf.getInt(); + int uncompressedSizeWithoutHeader = buf.getInt(); + long prevBlockOffset = buf.getLong(); + byte checksumType = buf.get(); + int bytesPerChecksum = buf.getInt(); + int onDiskDataSizeWithHeader = buf.getInt(); + LOG.debug( + "blockMagic={}, onDiskSizeWithoutHeader={}, uncompressedSizeWithoutHeader={}, " + + "prevBlockOffset={}, checksumType={}, bytesPerChecksum={}, onDiskDataSizeWithHeader={}", + Bytes.toStringBinary(blockMagic), onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, + prevBlockOffset, checksumType, bytesPerChecksum, onDiskDataSizeWithHeader); + } + + /** + * Data class to enabled messing with the bytes behind an {@link HFileBlock}. + */ + public static class HFileBlockChannelPosition implements Closeable { + private final SeekableByteChannel channel; + private final long position; + + public HFileBlockChannelPosition(SeekableByteChannel channel, long position) { + this.channel = channel; + this.position = position; + } + + public SeekableByteChannel getChannel() { + return channel; + } + + public long getPosition() { + return position; + } + + public void rewind() throws IOException { + channel.position(position); + } + + @Override + public void close() throws IOException { + channel.close(); + } + } + + /** + * Reads blocks off of an {@link HFileBlockChannelPositionIterator}, counting them as it does. + */ + public static class CountingConsumer { + private final HFileBlockChannelPositionIterator iterator; + private int itemsRead = 0; + + public CountingConsumer(HFileBlockChannelPositionIterator iterator) { + this.iterator = iterator; + } + + public int getItemsRead() { + return itemsRead; + } + + public Object readFully() throws IOException { + Object val = null; + for (itemsRead = 0; iterator.hasNext(); itemsRead++) { + val = iterator.next(); + } + return val; + } + } + + /** + * A simplified wrapper over an {@link HFileBlock.BlockIterator} that looks a lot like an + * {@link java.util.Iterator}. + */ + public static class HFileBlockChannelPositionIterator implements Closeable { + + private final HFileTestRule hFileTestRule; + private final HFile.Reader reader; + private final HFileBlock.BlockIterator iter; + private HFileBlockChannelPosition current = null; + + public HFileBlockChannelPositionIterator(HFileTestRule hFileTestRule) throws IOException { + Configuration conf = hFileTestRule.getConfiguration(); + HFileSystem hfs = hFileTestRule.getHFileSystem(); + Path hfsPath = hFileTestRule.getPath(); + + HFile.Reader reader = null; + HFileBlock.BlockIterator iter = null; + try { + reader = HFile.createReader(hfs, hfsPath, CacheConfig.DISABLED, true, conf); + HFileBlock.FSReader fsreader = reader.getUncachedBlockReader(); + iter = fsreader.blockRange(0, hfs.getFileStatus(hfsPath).getLen()); + } catch (IOException e) { + if (reader != null) { + closeQuietly(reader::close); + } + throw e; + } + + this.hFileTestRule = hFileTestRule; + this.reader = reader; + this.iter = iter; + } + + public boolean hasNext() throws IOException { + HFileBlock next = iter.nextBlock(); + SeekableByteChannel channel = hFileTestRule.getRWChannel(); + if (next != null) { + current = new HFileBlockChannelPosition(channel, next.getOffset()); + } + return next != null; + } + + public HFileBlockChannelPosition next() { + if (current == null) { + throw new NoSuchElementException(); + } + HFileBlockChannelPosition ret = current; + current = null; + return ret; + } + + @Override + public void close() throws IOException { + if (current != null) { + closeQuietly(current::close); + } + closeQuietly(reader::close); + } + + @FunctionalInterface + private interface CloseMethod { + void run() throws IOException; + } + + private static void closeQuietly(CloseMethod closeMethod) { + try { + closeMethod.run(); + } catch (Throwable e) { + LOG.debug("Ignoring thrown exception.", e); + } + } + } + + /** + * Enables writing and rewriting portions of the file backing an {@link HFileBlock}. + */ + public static class Corrupter { + + private final HFileBlockChannelPosition channelAndPosition; + private final ByteBuffer originalHeader; + + public Corrupter(HFileBlockChannelPosition channelAndPosition) throws IOException { + this.channelAndPosition = channelAndPosition; + this.originalHeader = readHeaderData(channelAndPosition); + } + + private static ByteBuffer readHeaderData(HFileBlockChannelPosition channelAndPosition) + throws IOException { + SeekableByteChannel channel = channelAndPosition.getChannel(); + ByteBuffer originalHeader = ByteBuffer.allocate(HFileBlock.headerSize(true)); + channelAndPosition.rewind(); + channel.read(originalHeader); + return originalHeader; + } + + public void write(int offset, ByteBuffer src) throws IOException { + SeekableByteChannel channel = channelAndPosition.getChannel(); + long position = channelAndPosition.getPosition(); + channel.position(position + offset); + channel.write(src); + } + + public void restore() throws IOException { + SeekableByteChannel channel = channelAndPosition.getChannel(); + originalHeader.rewind(); + channelAndPosition.rewind(); + assertEquals(originalHeader.capacity(), channel.write(originalHeader)); + } + } + + public static class HFileTestRule extends ExternalResource { + + private final HBaseTestingUtil testingUtility; + private final HFileSystem hfs; + private final HFileContext context; + private final TestName testName; + private Path path; + + public HFileTestRule(HBaseTestingUtil testingUtility, TestName testName) throws IOException { + this.testingUtility = testingUtility; + this.testName = testName; + this.hfs = (HFileSystem) HFileSystem.get(testingUtility.getConfiguration()); + this.context = + new HFileContextBuilder().withBlockSize(4 * 1024).withHBaseCheckSum(true).build(); + } + + public Configuration getConfiguration() { + return testingUtility.getConfiguration(); + } + + public HFileSystem getHFileSystem() { + return hfs; + } + + public HFileContext getHFileContext() { + return context; + } + + public Path getPath() { + return path; + } + + public SeekableByteChannel getRWChannel() throws IOException { + java.nio.file.Path p = FileSystems.getDefault().getPath(path.toString()); + return Files.newByteChannel(p, StandardOpenOption.READ, StandardOpenOption.WRITE, + StandardOpenOption.DSYNC); + } + + @Override + protected void before() throws Throwable { + this.path = new Path(testingUtility.getDataTestDirOnTestFS(), testName.getMethodName()); + HFile.WriterFactory factory = + HFile.getWriterFactory(testingUtility.getConfiguration(), CacheConfig.DISABLED) + .withPath(hfs, path).withFileContext(context); + + CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); + Random rand = new Random(Instant.now().toEpochMilli()); + byte[] family = Bytes.toBytes("f"); + try (HFile.Writer writer = factory.create()) { + for (int i = 0; i < 40; i++) { + byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 100); + byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand); + byte[] value = RandomKeyValueUtil.randomValue(rand); + Cell cell = cellBuilder.setType(Cell.Type.Put).setRow(row).setFamily(family) + .setQualifier(qualifier).setValue(value).build(); + writer.append(cell); + cellBuilder.clear(); + } + } + } + } + + /** + * A Matcher implementation that can make basic assertions over a provided {@link Throwable}. + * Assertion failures include the full stacktrace in their description. + */ + private static final class IsThrowableMatching extends TypeSafeMatcher { + + private final List> requirements = new LinkedList<>(); + + public IsThrowableMatching withInstanceOf(Class type) { + requirements.add(instanceOf(type)); + return this; + } + + public IsThrowableMatching withMessage(Matcher matcher) { + requirements.add(hasProperty("message", matcher)); + return this; + } + + @Override + protected boolean matchesSafely(Throwable throwable) { + return allOf(requirements).matches(throwable); + } + + @Override + protected void describeMismatchSafely(Throwable item, Description mismatchDescription) { + allOf(requirements).describeMismatch(item, mismatchDescription); + // would be nice if `item` could be provided as the cause of the AssertionError instead. + mismatchDescription.appendText(String.format("%nProvided: ")); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + try (PrintStream ps = new PrintStream(baos, false, StandardCharsets.UTF_8.name())) { + item.printStackTrace(ps); + ps.flush(); + } + mismatchDescription.appendText(baos.toString(StandardCharsets.UTF_8.name())); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void describeTo(Description description) { + description.appendDescriptionOf(allOf(requirements)); + } + } +} From 05f5d5b33c7502b2b1fb8b10f557046cd9ea85da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Sep 2023 21:26:45 +0800 Subject: [PATCH 086/514] HBASE-28108 Bump cryptography in /dev-support/git-jira-release-audit (#5427) Bumps [cryptography](https://github.com/pyca/cryptography) from 41.0.3 to 41.0.4. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/41.0.3...41.0.4) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 47e6a96aa77e..e1a2497593e3 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -19,7 +19,7 @@ blessed==1.17.0 certifi==2023.7.22 cffi==1.13.2 chardet==3.0.4 -cryptography==41.0.3 +cryptography==41.0.4 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 From 4b76a95e032a0426f34a979dd605913ee8bb8d2c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 24 Sep 2023 16:14:40 +0800 Subject: [PATCH 087/514] HBASE-28101 Addendum do not throw EOFException out directly (#5431) Signed-off-by: Nihal Jain --- .../hbase/ipc/NettyRpcDuplexHandler.java | 65 ++++++++++++------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 34869314bab0..47b0b29a5c6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -126,6 +126,37 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) } } + private void finishCall(ResponseHeader responseHeader, ByteBufInputStream in, Call call) + throws IOException { + Message value; + if (call.responseDefaultType != null) { + Message.Builder builder = call.responseDefaultType.newBuilderForType(); + if (!builder.mergeDelimitedFrom(in)) { + // The javadoc of mergeDelimitedFrom says returning false means the stream reaches EOF + // before reading any bytes out, so here we need to manually finish create the EOFException + // and finish the call + call.setException(new EOFException("EOF while reading response with type: " + + call.responseDefaultType.getClass().getName())); + return; + } + value = builder.build(); + } else { + value = null; + } + CellScanner cellBlockScanner; + if (responseHeader.hasCellBlockMeta()) { + int size = responseHeader.getCellBlockMeta().getLength(); + // Maybe we could read directly from the ByteBuf. + // The problem here is that we do not know when to release it. + byte[] cellBlock = new byte[size]; + in.readFully(cellBlock); + cellBlockScanner = cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); + } else { + cellBlockScanner = null; + } + call.setResponse(value, cellBlockScanner); + } + private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOException { int totalSize = buf.readInt(); ByteBufInputStream in = new ByteBufInputStream(buf); @@ -166,31 +197,17 @@ private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOExcep call.setException(remoteExc); return; } - Message value; - if (call.responseDefaultType != null) { - Message.Builder builder = call.responseDefaultType.newBuilderForType(); - if (!builder.mergeDelimitedFrom(in)) { - // The javadoc of mergeDelimitedFrom says returning false means the stream reaches EOF - // before reading any bytes out, so here we need to manually throw the EOFException out - throw new EOFException( - "EOF while reading response with type: " + call.responseDefaultType.getClass().getName()); - } - value = builder.build(); - } else { - value = null; - } - CellScanner cellBlockScanner; - if (responseHeader.hasCellBlockMeta()) { - int size = responseHeader.getCellBlockMeta().getLength(); - // Maybe we could read directly from the ByteBuf. - // The problem here is that we do not know when to release it. - byte[] cellBlock = new byte[size]; - buf.readBytes(cellBlock); - cellBlockScanner = cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); - } else { - cellBlockScanner = null; + try { + finishCall(responseHeader, in, call); + } catch (IOException e) { + // As the call has been removed from id2Call map, if we hit an exception here, the + // exceptionCaught method can not help us finish the call, so here we need to catch the + // exception and finish it + // And in netty, the decoding the frame based, when reaching here we have already read a full + // frame, so hitting exception here does not mean the stream decoding is broken, thus we do + // not need to throw the exception out and close the connection. + call.setException(e); } - call.setResponse(value, cellBlockScanner); } @Override From 79fbbc3ef44fc599eee61c6170342fc1b49bf39b Mon Sep 17 00:00:00 2001 From: Ke Han <38852697+hanke580@users.noreply.github.com> Date: Mon, 25 Sep 2023 13:05:54 -0400 Subject: [PATCH 088/514] HBASE-28105 NPE in QuotaCache if Table is dropped from cluster (#5426) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/quotas/QuotaCache.java | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 56253f7fcbb2..0f7b5e42e68b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -371,19 +371,24 @@ private void updateQuotaFactors() { // Update table machine quota factors for (TableName tableName : tableQuotaCache.keySet()) { - double factor = 1; - try { - long regionSize = tableRegionStatesCount.get(tableName).getOpenRegions(); - if (regionSize == 0) { - factor = 0; - } else { - int localRegionSize = rsServices.getRegions(tableName).size(); - factor = 1.0 * localRegionSize / regionSize; + if (tableRegionStatesCount.containsKey(tableName)) { + double factor = 1; + try { + long regionSize = tableRegionStatesCount.get(tableName).getOpenRegions(); + if (regionSize == 0) { + factor = 0; + } else { + int localRegionSize = rsServices.getRegions(tableName).size(); + factor = 1.0 * localRegionSize / regionSize; + } + } catch (IOException e) { + LOG.warn("Get table regions failed: {}", tableName, e); } - } catch (IOException e) { - LOG.warn("Get table regions failed: {}", tableName, e); + tableMachineQuotaFactors.put(tableName, factor); + } else { + // TableName might have already been dropped (outdated) + tableMachineQuotaFactors.remove(tableName); } - tableMachineQuotaFactors.put(tableName, factor); } } } From 6cb15b14d4e2b9d44e3ade3efaff281090fb4d72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 15:29:36 +0800 Subject: [PATCH 089/514] HBASE-28112 Bump org.xerial.snappy:snappy-java from 1.1.10.1 to 1.1.10.4 (#5436) Bumps [org.xerial.snappy:snappy-java](https://github.com/xerial/snappy-java) from 1.1.10.1 to 1.1.10.4. - [Release notes](https://github.com/xerial/snappy-java/releases) - [Commits](https://github.com/xerial/snappy-java/compare/v1.1.10.1...v1.1.10.4) --- updated-dependencies: - dependency-name: org.xerial.snappy:snappy-java dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e43a27c6afee..56058b8e2b97 100644 --- a/pom.xml +++ b/pom.xml @@ -895,7 +895,7 @@ 0.24 1.11.0 1.8.0 - 1.1.10.1 + 1.1.10.4 1.9 1.5.5-2 4.1.4 From e2a10f61800fe130234f80869a7ab01967cde285 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Wed, 27 Sep 2023 13:58:13 -0400 Subject: [PATCH 090/514] HBASE-27784: support quota user overrides (#5424) Signed-off-by: Nick Dimiduk Signed-off-by: Bryan Beaudreault --- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 12 +- .../apache/hadoop/hbase/ipc/ServerCall.java | 13 ++ .../hadoop/hbase/quotas/QuotaCache.java | 36 +++++- .../namequeues/TestNamedQueueRecorder.java | 5 + .../region/TestRegionProcedureStore.java | 5 + .../hbase/quotas/TestQuotaUserOverride.java | 116 ++++++++++++++++++ 6 files changed, 185 insertions(+), 2 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 0555202f88b9..260d6e1a9803 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -92,11 +92,21 @@ public interface RpcCall extends RpcCallContext { Map getConnectionAttributes(); /** - * Returns the map of attributes specified when building the request. + * Returns the map of attributes specified when building the request. This map is lazily evaluated + * so if you only need a single attribute then it may be cheaper to use + * {@link #getRequestAttribute(String)} * @see org.apache.hadoop.hbase.client.TableBuilder#setRequestAttribute(String, byte[]) */ Map getRequestAttributes(); + /** + * Returns a single request attribute value, or null if no value is present. If you need many + * request attributes then you should fetch the lazily evaluated map via + * {@link #getRequestAttributes()} + * @see org.apache.hadoop.hbase.client.TableBuilder#setRequestAttribute(String, byte[]) + */ + byte[] getRequestAttribute(String key); + /** Returns Port of remote address in this call */ int getRemotePort(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 66a2e44fac19..ed688977b963 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -234,6 +234,19 @@ public Map getRequestAttributes() { return this.requestAttributes; } + @Override + public byte[] getRequestAttribute(String key) { + if (this.requestAttributes == null) { + for (HBaseProtos.NameBytesPair nameBytesPair : header.getAttributeList()) { + if (nameBytesPair.getName().equals(key)) { + return nameBytesPair.getValue().toByteArray(); + } + } + return null; + } + return this.requestAttributes.get(key); + } + @Override public int getPriority() { return this.header.getPriority(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 0f7b5e42e68b..0a57b9fd8f8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -24,6 +24,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -35,8 +36,11 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.RegionStatesCount; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.yetus.audience.InterfaceAudience; @@ -57,6 +61,11 @@ public class QuotaCache implements Stoppable { private static final Logger LOG = LoggerFactory.getLogger(QuotaCache.class); public static final String REFRESH_CONF_KEY = "hbase.quota.refresh.period"; + + // defines the request attribute key which, when provided, will override the request's username + // from the perspective of user quotas + public static final String QUOTA_USER_REQUEST_ATTRIBUTE_OVERRIDE_KEY = + "hbase.quota.user.override.key"; private static final int REFRESH_DEFAULT_PERIOD = 5 * 60000; // 5min private static final int EVICT_PERIOD_FACTOR = 5; // N * REFRESH_DEFAULT_PERIOD @@ -74,12 +83,15 @@ public class QuotaCache implements Stoppable { private final ConcurrentHashMap tableMachineQuotaFactors = new ConcurrentHashMap<>(); private final RegionServerServices rsServices; + private final String userOverrideRequestAttributeKey; private QuotaRefresherChore refreshChore; private boolean stopped = true; public QuotaCache(final RegionServerServices rsServices) { this.rsServices = rsServices; + this.userOverrideRequestAttributeKey = + rsServices.getConfiguration().get(QUOTA_USER_REQUEST_ATTRIBUTE_OVERRIDE_KEY); } public void start() throws IOException { @@ -125,7 +137,7 @@ public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableNa * @return the quota info associated to specified user */ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { - return computeIfAbsent(userQuotaCache, ugi.getShortUserName(), UserQuotaState::new, + return computeIfAbsent(userQuotaCache, getQuotaUserName(ugi), UserQuotaState::new, this::triggerCacheRefresh); } @@ -160,6 +172,28 @@ protected boolean isExceedThrottleQuotaEnabled() { return exceedThrottleQuotaEnabled; } + /** + * Applies a request attribute user override if available, otherwise returns the UGI's short + * username + * @param ugi The request's UserGroupInformation + */ + private String getQuotaUserName(final UserGroupInformation ugi) { + if (userOverrideRequestAttributeKey == null) { + return ugi.getShortUserName(); + } + + Optional rpcCall = RpcServer.getCurrentCall(); + if (!rpcCall.isPresent()) { + return ugi.getShortUserName(); + } + + byte[] override = rpcCall.get().getRequestAttribute(userOverrideRequestAttributeKey); + if (override == null) { + return ugi.getShortUserName(); + } + return Bytes.toString(override); + } + /** * Returns the QuotaState requested. If the quota info is not in cache an empty one will be * returned and the quota request will be enqueued for the next cache refresh. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index af6c51260fd5..35a1757115c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -771,6 +771,11 @@ public Map getRequestAttributes() { pair -> pair.getValue().toByteArray())); } + @Override + public byte[] getRequestAttribute(String key) { + return null; + } + @Override public int getRemotePort() { return 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index 83f788ba1518..305f0e29e952 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -232,6 +232,11 @@ public Map getRequestAttributes() { return null; } + @Override + public byte[] getRequestAttribute(String key) { + return null; + } + @Override public int getRemotePort() { return 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java new file mode 100644 index 000000000000..75b3cc3ca84a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestQuotaUserOverride { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestQuotaUserOverride.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final byte[] FAMILY = Bytes.toBytes("cf"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); + private static final int NUM_SERVERS = 1; + private static final String CUSTOM_OVERRIDE_KEY = "foo"; + + private static final TableName TABLE_NAME = TableName.valueOf("TestQuotaUserOverride"); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, 1_000); + TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 1); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 500); + TEST_UTIL.getConfiguration().set(QuotaCache.QUOTA_USER_REQUEST_ATTRIBUTE_OVERRIDE_KEY, + CUSTOM_OVERRIDE_KEY); + TEST_UTIL.startMiniCluster(NUM_SERVERS); + TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); + QuotaCache.TEST_FORCE_REFRESH = true; + TEST_UTIL.createTable(TABLE_NAME, FAMILY); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + EnvironmentEdgeManager.reset(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testUserGlobalThrottleWithCustomOverride() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final String userOverrideWithQuota = User.getCurrent().getShortName() + "123"; + + // Add 6req/min limit + admin.setQuota(QuotaSettingsFactory.throttleUser(userOverrideWithQuota, + ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES)); + + Table tableWithThrottle = TEST_UTIL.getConnection().getTableBuilder(TABLE_NAME, null) + .setRequestAttribute(CUSTOM_OVERRIDE_KEY, Bytes.toBytes(userOverrideWithQuota)).build(); + Table tableWithoutThrottle = TEST_UTIL.getConnection().getTableBuilder(TABLE_NAME, null) + .setRequestAttribute(QuotaCache.QUOTA_USER_REQUEST_ATTRIBUTE_OVERRIDE_KEY, + Bytes.toBytes(userOverrideWithQuota)) + .build(); + Table tableWithoutThrottle2 = + TEST_UTIL.getConnection().getTableBuilder(TABLE_NAME, null).build(); + + // warm things up + doPuts(10, FAMILY, QUALIFIER, tableWithThrottle); + doPuts(10, FAMILY, QUALIFIER, tableWithoutThrottle); + doPuts(10, FAMILY, QUALIFIER, tableWithoutThrottle2); + + // should reject some requests + assertTrue(10 > doPuts(10, FAMILY, QUALIFIER, tableWithThrottle)); + // should accept all puts + assertEquals(10, doPuts(10, FAMILY, QUALIFIER, tableWithoutThrottle)); + // should accept all puts + assertEquals(10, doPuts(10, FAMILY, QUALIFIER, tableWithoutThrottle2)); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userOverrideWithQuota)); + Thread.sleep(60_000); + assertEquals(10, doPuts(10, FAMILY, QUALIFIER, tableWithThrottle)); + assertEquals(10, doPuts(10, FAMILY, QUALIFIER, tableWithoutThrottle)); + assertEquals(10, doPuts(10, FAMILY, QUALIFIER, tableWithoutThrottle2)); + } + +} From ea0a356330e1e6a2bcd0a3e2b86ccb9075f4fa9e Mon Sep 17 00:00:00 2001 From: Xiaolin Ha Date: Thu, 28 Sep 2023 08:58:18 +0800 Subject: [PATCH 091/514] HBASE-28047 Deadlock when opening mob files (#5374) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/mob/MobFileCache.java | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java index ed1803cb38d7..b353b53ffb71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java @@ -38,6 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.hash.Hashing; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -174,7 +175,7 @@ public void evictFile(String fileName) { IdLock.Entry lockEntry = null; try { // obtains the lock to close the cached file. - lockEntry = keyLock.getLockEntry(fileName.hashCode()); + lockEntry = keyLock.getLockEntry(hashFileName(fileName)); CachedMobFile evictedFile = map.remove(fileName); if (evictedFile != null) { evictedFile.close(); @@ -205,7 +206,7 @@ public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws } else { String fileName = path.getName(); CachedMobFile cached = map.get(fileName); - IdLock.Entry lockEntry = keyLock.getLockEntry(fileName.hashCode()); + IdLock.Entry lockEntry = keyLock.getLockEntry(hashFileName(fileName)); try { if (cached == null) { cached = map.get(fileName); @@ -238,7 +239,7 @@ public void closeFile(MobFile file) { if (!isCacheEnabled) { file.close(); } else { - lockEntry = keyLock.getLockEntry(file.getFileName().hashCode()); + lockEntry = keyLock.getLockEntry(hashFileName(file.getFileName())); file.close(); } } catch (IOException e) { @@ -325,4 +326,13 @@ public void printStatistics() { lastEvictedFileCount += evicted; } + /** + * Use murmurhash to reduce the conflicts of hashed file names. We should notice that the hash + * conflicts may bring deadlocks, when opening mob files with evicting some other files, as + * described in HBASE-28047. + */ + private long hashFileName(String fileName) { + return Hashing.murmur3_128().hashString(fileName, java.nio.charset.StandardCharsets.UTF_8) + .asLong(); + } } From 4bc7d4795bfa26fccda0d77babb7906ddf50d1a9 Mon Sep 17 00:00:00 2001 From: Rahul Kumar Date: Thu, 28 Sep 2023 13:05:11 +0530 Subject: [PATCH 092/514] HBASE-28068 addendum Co-authored-by: Rahul Kumar --- hbase-common/src/main/resources/hbase-default.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 61eb4a0059fa..17a9853d2ad3 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -658,7 +658,7 @@ possible configurations would overwhelm and obscure the important. hbase.normalizer.merge.merge_request_max_number_of_regions - 50 + 100 The maximum number of region count in a merge request for merge normalization. From 78e6a51758cc46d5e42b5e56f7be40c68d4b3a1b Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 28 Sep 2023 21:15:50 -0800 Subject: [PATCH 093/514] HBASE-28050 RSProcedureDispatcher to fail-fast for krb auth failures (#5391) Signed-off-by: Duo Zhang Signed-off-by: Andrew Purtell Signed-off-by: Aman Poonia Signed-off-by: David Manning --- .../hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- .../hbase/ipc/RpcConnectionConstants.java | 34 +++++++++++ .../procedure/RSProcedureDispatcher.java | 59 ++++++++++++++++--- 3 files changed, 85 insertions(+), 10 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnectionConstants.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 3f9a58d51263..408ea347e7a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -347,7 +347,7 @@ public void operationComplete(ChannelFuture future) throws Exception { private void sendRequest0(Call call, HBaseRpcController hrc) throws IOException { assert eventLoop.inEventLoop(); if (reloginInProgress) { - throw new IOException("Can not send request because relogin is in progress."); + throw new IOException(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS); } hrc.notifyOnCancel(new RpcCallback() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnectionConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnectionConstants.java new file mode 100644 index 000000000000..2b9853033393 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnectionConstants.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Constants to be used by RPC connection based utilities. + */ +@InterfaceAudience.Private +public final class RpcConnectionConstants { + + private RpcConnectionConstants() { + } + + public static final String RELOGIN_IS_IN_PROGRESS = + "Can not send request because relogin is in progress."; + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index af22fba27290..abc9c575a62e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -22,11 +22,13 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import javax.security.sasl.SaslException; import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.ipc.RpcConnectionConstants; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.ServerListener; @@ -287,17 +289,15 @@ private boolean scheduleForRetry(IOException e) { numberOfAttemptsSoFar); return false; } - // This exception is thrown in the rpc framework, where we can make sure that the call has not - // been executed yet, so it is safe to mark it as fail. Especially for open a region, we'd - // better choose another region server. + // This category of exceptions is thrown in the rpc framework, where we can make sure + // that the call has not been executed yet, so it is safe to mark it as fail. + // Especially for open a region, we'd better choose another region server. // Notice that, it is safe to quit only if this is the first time we send request to region // server. Maybe the region server has accepted our request the first time, and then there is - // a network error which prevents we receive the response, and the second time we hit a - // CallQueueTooBigException, obviously it is not safe to quit here, otherwise it may lead to a - // double assign... - if (e instanceof CallQueueTooBigException && numberOfAttemptsSoFar == 0) { - LOG.warn("request to {} failed due to {}, try={}, this usually because" - + " server is overloaded, give up", serverName, e.toString(), numberOfAttemptsSoFar); + // a network error which prevents we receive the response, and the second time we hit + // this category of exceptions, obviously it is not safe to quit here, otherwise it may lead + // to a double assign... + if (numberOfAttemptsSoFar == 0 && unableToConnectToServer(e)) { return false; } // Always retry for other exception types if the region server is not dead yet. @@ -330,6 +330,47 @@ private boolean scheduleForRetry(IOException e) { return true; } + /** + * The category of exceptions where we can ensure that the request has not yet been received + * and/or processed by the target regionserver yet and hence we can determine whether it is safe + * to choose different regionserver as the target. + * @param e IOException thrown by the underlying rpc framework. + * @return true if the exception belongs to the category where the regionserver has not yet + * received the request yet. + */ + private boolean unableToConnectToServer(IOException e) { + if (e instanceof CallQueueTooBigException) { + LOG.warn("request to {} failed due to {}, try={}, this usually because" + + " server is overloaded, give up", serverName, e, numberOfAttemptsSoFar); + return true; + } + if (isSaslError(e)) { + LOG.warn("{} is not reachable; give up after first attempt", serverName, e); + return true; + } + return false; + } + + private boolean isSaslError(IOException e) { + Throwable cause = e; + while (true) { + if (cause instanceof IOException) { + IOException unwrappedCause = unwrapException((IOException) cause); + if ( + unwrappedCause instanceof SaslException + || (unwrappedCause.getMessage() != null && unwrappedCause.getMessage() + .contains(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS)) + ) { + return true; + } + } + cause = cause.getCause(); + if (cause == null) { + return false; + } + } + } + private long getMaxWaitTime() { if (this.maxWaitTime < 0) { // This is the max attempts, not retries, so it should be at least 1. From ff0f0e431869bbb9fe8a0e3aec87faa4718a2bd6 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 30 Sep 2023 16:23:08 +0800 Subject: [PATCH 094/514] HBASE-27970 Make sync replication also work with FSHLog (#5430) Signed-off-by: chenglei --- .../hbase/util/IOExceptionConsumer.java | 28 ++ .../hbase/util/IOExceptionRunnable.java | 28 ++ .../hbase/master/region/MasterRegion.java | 2 +- ...nsitPeerSyncReplicationStateProcedure.java | 1 - .../hbase/regionserver/HRegionServer.java | 4 +- .../hbase/regionserver/SplitLogWorker.java | 4 +- .../hbase/regionserver/StoreEngine.java | 6 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 138 +++++++- .../hbase/regionserver/wal/AsyncFSWAL.java | 89 +---- .../regionserver/wal/CombinedAsyncWriter.java | 41 +-- .../regionserver/wal/CombinedWriter.java | 94 +++++ .../regionserver/wal/CombinedWriterBase.java | 73 ++++ .../regionserver/wal/DualAsyncFSWAL.java | 119 ------- .../hadoop/hbase/regionserver/wal/FSHLog.java | 16 +- .../regionserver/PeerActionListener.java | 5 +- .../replication/regionserver/Replication.java | 42 ++- .../ReplicationSourceManager.java | 4 +- .../regionserver/ReplicationSyncUp.java | 11 +- .../hbase/wal/AbstractFSWALProvider.java | 51 +-- ...Provider.java => AbstractWALProvider.java} | 335 ++++++++---------- .../hadoop/hbase/wal/AsyncFSWALProvider.java | 16 +- .../hadoop/hbase/wal/FSHLogProvider.java | 13 +- .../hbase/wal/RegionGroupingProvider.java | 125 ++++--- .../java/org/apache/hadoop/hbase/wal/WAL.java | 14 + .../apache/hadoop/hbase/wal/WALFactory.java | 38 +- .../apache/hadoop/hbase/wal/WALProvider.java | 10 + .../regionserver/TestFailedAppendAndSync.java | 4 +- .../hbase/regionserver/TestHRegion.java | 4 +- ...estRegionReplicationForWriteException.java | 12 +- .../regionserver/wal/AbstractTestFSWAL.java | 4 +- .../regionserver/wal/TestAsyncFSWAL.java | 20 +- .../wal/TestAsyncFSWALDurability.java | 10 +- .../wal/TestAsyncFSWALRollStuck.java | 6 +- .../regionserver/wal/TestAsyncWALReplay.java | 6 +- .../wal/TestFSHLogDurability.java | 4 +- .../wal/TestWALSyncTimeoutException.java | 12 +- .../BrokenRemoteAsyncFSWALProvider.java | 190 ++++++++++ .../replication/DualAsyncFSWALForTest.java | 154 -------- ...ava => SyncReplicationActiveTestBase.java} | 8 +- ...va => SyncReplicationStandbyTestBase.java} | 29 +- .../TestSyncReplicationActiveAsyncFSWAL.java | 41 +++ .../TestSyncReplicationActiveFSHLog.java | 41 +++ ...eplicationMoreLogsInLocalCopyToRemote.java | 31 +- ...icationMoreLogsInLocalGiveUpSplitting.java | 44 ++- .../TestSyncReplicationStandbyAsyncFSWAL.java | 41 +++ .../TestSyncReplicationStandbyFSHLog.java | 41 +++ .../TestReplicationSourceManager.java | 2 +- .../TestSyncReplicationShipperQuit.java | 6 +- .../hadoop/hbase/wal/IOTestProvider.java | 2 +- ...SWALCorruptionDueToDanglingByteBuffer.java | 6 +- .../wal/TestSyncReplicationWALProvider.java | 173 --------- .../hadoop/hbase/wal/TestWALFactory.java | 22 +- 52 files changed, 1170 insertions(+), 1050 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionConsumer.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionRunnable.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriter.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriterBase.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java rename hbase-server/src/main/java/org/apache/hadoop/hbase/wal/{SyncReplicationWALProvider.java => AbstractWALProvider.java} (50%) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/BrokenRemoteAsyncFSWALProvider.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/replication/{TestSyncReplicationActive.java => SyncReplicationActiveTestBase.java} (95%) rename hbase-server/src/test/java/org/apache/hadoop/hbase/replication/{TestSyncReplicationStandBy.java => SyncReplicationStandbyTestBase.java} (84%) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveAsyncFSWAL.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveFSHLog.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyAsyncFSWAL.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyFSHLog.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionConsumer.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionConsumer.java new file mode 100644 index 000000000000..0c494baefb39 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionConsumer.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +@FunctionalInterface +public interface IOExceptionConsumer { + + void accept(T t) throws IOException; +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionRunnable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionRunnable.java new file mode 100644 index 000000000000..1658eafcc72a --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionRunnable.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +@FunctionalInterface +public interface IOExceptionRunnable { + + void run() throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index e45b6271f7b9..05ae6547f310 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -380,7 +380,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException params.archivedWalSuffix(), params.rollPeriodMs(), params.flushSize()); walRoller.start(); - WALFactory walFactory = new WALFactory(conf, server.getServerName(), server, false); + WALFactory walFactory = new WALFactory(conf, server.getServerName(), server); Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName()); Path initializingFlag = new Path(tableDir, INITIALIZING_FLAG); Path initializedFlag = new Path(tableDir, INITIALIZED_FLAG); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java index df6078d64bed..79e23dd0226a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; -import org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationUtils; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 07d2ac332c5f..110a9f7fe562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1734,7 +1734,7 @@ public boolean isOnline() { * be hooked up to WAL. */ private void setupWALAndReplication() throws IOException { - WALFactory factory = new WALFactory(conf, serverName, this, true); + WALFactory factory = new WALFactory(conf, serverName, this); // TODO Replication make assumptions here based on the default filesystem impl Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString()); @@ -2122,7 +2122,7 @@ public LogRoller getWalRoller() { return walRoller; } - WALFactory getWalFactory() { + public WALFactory getWalFactory() { return walFactory; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index c5ef49be203f..11cfd8fef476 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.AbstractWALProvider; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.yetus.audience.InterfaceAudience; @@ -97,7 +97,7 @@ private static boolean processSyncReplicationWAL(String name, Configuration conf Path walFile = new Path(walDir, name); String filename = walFile.getName(); Optional optSyncPeerId = - SyncReplicationWALProvider.getSyncReplicationPeerIdFromWALName(filename); + AbstractWALProvider.getSyncReplicationPeerIdFromWALName(filename); if (!optSyncPeerId.isPresent()) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java index f6e3db0116bb..34f882516bae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.Compactor; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; +import org.apache.hadoop.hbase.util.IOExceptionRunnable; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -458,11 +459,6 @@ public List commitStoreFiles(List files, boolean validate) thr return committedFiles; } - @FunctionalInterface - public interface IOExceptionRunnable { - void run() throws IOException; - } - /** * Add the store files to store file manager, and also record it in the store file tracker. *

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index b3445ab42423..acf3231d4e90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; @@ -109,6 +110,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -187,6 +189,10 @@ public abstract class AbstractFSWAL implements WAL { */ protected final Path walDir; + private final FileSystem remoteFs; + + private final Path remoteWALDir; + /** * dir path where old logs are kept. */ @@ -439,6 +445,10 @@ private static final class WALProps { protected boolean shouldShutDownConsumeExecutorWhenClose = true; + private volatile boolean skipRemoteWAL = false; + + private volatile boolean markerEditOnly = false; + public long getFilenum() { return this.filenum.get(); } @@ -502,22 +512,18 @@ protected final void createSingleThreadPoolConsumeExecutor(String walType, final this.shouldShutDownConsumeExecutorWhenClose = true; } - protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) - throws FailedLogCloseException, IOException { - this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); - } - protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, final String logDir, final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) throws FailedLogCloseException, IOException { + final String suffix, FileSystem remoteFs, Path remoteWALDir) + throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); this.conf = conf; this.abortable = abortable; + this.remoteFs = remoteFs; + this.remoteWALDir = remoteWALDir; if (!fs.exists(walDir) && !fs.mkdirs(walDir)) { throw new IOException("Unable to mkdir " + walDir); @@ -633,6 +639,7 @@ public boolean accept(final Path fileName) { /** * Used to initialize the WAL. Usually just call rollWriter to create the first log writer. */ + @Override public void init() throws IOException { rollWriter(); } @@ -1032,6 +1039,37 @@ private IOException convertInterruptedExceptionToIOException(final InterruptedEx return ioe; } + private W createCombinedWriter(W localWriter, Path localPath) + throws IOException, CommonFSUtils.StreamLacksCapabilityException { + // retry forever if we can not create the remote writer to prevent aborting the RS due to log + // rolling error, unless the skipRemoteWal is set to true. + // TODO: since for now we only have one thread doing log rolling, this may block the rolling for + // other wals + Path remoteWAL = new Path(remoteWALDir, localPath.getName()); + for (int retry = 0;; retry++) { + if (skipRemoteWAL) { + return localWriter; + } + W remoteWriter; + try { + remoteWriter = createWriterInstance(remoteFs, remoteWAL); + } catch (IOException e) { + LOG.warn("create remote writer {} failed, retry = {}", remoteWAL, retry, e); + try { + Thread.sleep(ConnectionUtils.getPauseTime(100, retry)); + } catch (InterruptedException ie) { + // restore the interrupt state + Thread.currentThread().interrupt(); + // must close local writer here otherwise no one will close it for us + Closeables.close(localWriter, true); + throw (IOException) new InterruptedIOException().initCause(ie); + } + continue; + } + return createCombinedWriter(localWriter, remoteWriter); + } + } + private Map> rollWriterInternal(boolean force) throws IOException { rollWriterLock.lock(); try { @@ -1047,7 +1085,11 @@ private Map> rollWriterInternal(boolean force) throws IOExc Path oldPath = getOldPath(); Path newPath = getNewPath(); // Any exception from here on is catastrophic, non-recoverable so we currently abort. - W nextWriter = this.createWriterInstance(newPath); + W nextWriter = this.createWriterInstance(fs, newPath); + if (remoteFs != null) { + // create a remote wal if necessary + nextWriter = createCombinedWriter(nextWriter, newPath); + } tellListenersAboutPreLogRoll(oldPath, newPath); // NewPath could be equal to oldPath if replaceWriter fails. newPath = replaceWriter(oldPath, newPath, nextWriter); @@ -1771,7 +1813,9 @@ private void consume() { /** * This method is used to be compatible with the original logic of {@link AsyncFSWAL}. */ - preAppendAndSync(); + if (markerEditOnly) { + drainNonMarkerEditsAndFailSyncs(); + } try { appendAndSync(); } catch (IOException exception) { @@ -1817,9 +1861,6 @@ private void consume() { consumeExecutor.execute(consumer); } - protected void preAppendAndSync() { - } - private boolean shouldScheduleConsumer() { int currentEpochAndState = epochAndState; if (writerBroken(currentEpochAndState) || waitingRoll(currentEpochAndState)) { @@ -1860,7 +1901,9 @@ private boolean shouldScheduleConsumer() { */ protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore) throws IOException { - precheckBeforeAppendWALEdit(hri, key, edits, inMemstore); + if (markerEditOnly && !edits.isMetaEdit()) { + throw new IOException("WAL is closing, only marker edit is allowed"); + } long txid = stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads); if (shouldScheduleConsumer()) { @@ -1869,10 +1912,6 @@ protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inM return txid; } - protected void precheckBeforeAppendWALEdit(RegionInfo hri, WALKeyImpl key, WALEdit edits, - boolean inMemstore) throws IOException { - } - protected void doSync(boolean forceSync) throws IOException { long txid = waitingConsumePayloads.next(); SyncFuture future; @@ -1909,9 +1948,53 @@ protected void doSync(long txid, boolean forceSync) throws IOException { blockOnSync(future); } - protected abstract W createWriterInstance(Path path) + private void drainNonMarkerEditsAndFailSyncs() { + if (toWriteAppends.isEmpty()) { + return; + } + boolean hasNonMarkerEdits = false; + Iterator iter = toWriteAppends.descendingIterator(); + while (iter.hasNext()) { + FSWALEntry entry = iter.next(); + if (!entry.getEdit().isMetaEdit()) { + entry.release(); + hasNonMarkerEdits = true; + break; + } + } + if (hasNonMarkerEdits) { + for (;;) { + iter.remove(); + if (!iter.hasNext()) { + break; + } + iter.next().release(); + } + for (FSWALEntry entry : unackedAppends) { + entry.release(); + } + unackedAppends.clear(); + // fail the sync futures which are under the txid of the first remaining edit, if none, fail + // all the sync futures. + long txid = toWriteAppends.isEmpty() ? Long.MAX_VALUE : toWriteAppends.peek().getTxid(); + IOException error = new IOException("WAL is closing, only marker edit is allowed"); + for (Iterator syncIter = syncFutures.iterator(); syncIter.hasNext();) { + SyncFuture future = syncIter.next(); + if (future.getTxid() < txid) { + markFutureDoneAndOffer(future, future.getTxid(), error); + syncIter.remove(); + } else { + break; + } + } + } + } + + protected abstract W createWriterInstance(FileSystem fs, Path path) throws IOException, CommonFSUtils.StreamLacksCapabilityException; + protected abstract W createCombinedWriter(W localWriter, W remoteWriter); + protected final void waitForSafePoint() { consumeLock.lock(); try { @@ -2125,6 +2208,23 @@ public void checkLogLowReplication(long checkInterval) { } } + // Allow temporarily skipping the creation of remote writer. When failing to write to the remote + // dfs cluster, we need to reopen the regions and switch to use the original wal writer. But we + // need to write a close marker when closing a region, and if it fails, the whole rs will abort. + // So here we need to skip the creation of remote writer and make it possible to write the region + // close marker. + // Setting markerEdit only to true is for transiting from A to S, where we need to give up writing + // any pending wal entries as they will be discarded. The remote cluster will replicated the + // correct data back later. We still need to allow writing marker edits such as close region event + // to allow closing a region. + @Override + public void skipRemoteWAL(boolean markerEditOnly) { + if (markerEditOnly) { + this.markerEditOnly = true; + } + this.skipRemoteWAL = true; + } + private static void split(final Configuration conf, final Path p) throws IOException { FileSystem fs = CommonFSUtils.getWALFileSystem(conf); if (!fs.exists(p)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 5de9d4d6b8d9..8d4afb322d5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.lang.reflect.Field; -import java.util.Iterator; import java.util.List; import java.util.Queue; import java.util.concurrent.CompletableFuture; @@ -28,12 +27,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput; import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; -import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -125,21 +121,13 @@ public class AsyncFSWAL extends AbstractFSWAL { private final StreamSlowMonitor streamSlowMonitor; - public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, String prefix, - String suffix, EventLoopGroup eventLoopGroup, Class channelClass) - throws FailedLogCloseException, IOException { - this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, "monitorForSuffix")); - } - public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, String archiveDir, Configuration conf, List listeners, - boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) + boolean failIfWALExists, String prefix, String suffix, FileSystem remoteFs, Path remoteWALDir, + EventLoopGroup eventLoopGroup, Class channelClass, StreamSlowMonitor monitor) throws FailedLogCloseException, IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix); + suffix, remoteFs, remoteWALDir); this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; this.streamSlowMonitor = monitor; @@ -174,76 +162,13 @@ protected CompletableFuture doWriterSync(AsyncWriter writer, boolean shoul return writer.sync(shouldUseHsync); } - private void drainNonMarkerEditsAndFailSyncs() { - if (toWriteAppends.isEmpty()) { - return; - } - boolean hasNonMarkerEdits = false; - Iterator iter = toWriteAppends.descendingIterator(); - while (iter.hasNext()) { - FSWALEntry entry = iter.next(); - if (!entry.getEdit().isMetaEdit()) { - entry.release(); - hasNonMarkerEdits = true; - break; - } - } - if (hasNonMarkerEdits) { - for (;;) { - iter.remove(); - if (!iter.hasNext()) { - break; - } - iter.next().release(); - } - for (FSWALEntry entry : unackedAppends) { - entry.release(); - } - unackedAppends.clear(); - // fail the sync futures which are under the txid of the first remaining edit, if none, fail - // all the sync futures. - long txid = toWriteAppends.isEmpty() ? Long.MAX_VALUE : toWriteAppends.peek().getTxid(); - IOException error = new IOException("WAL is closing, only marker edit is allowed"); - for (Iterator syncIter = syncFutures.iterator(); syncIter.hasNext();) { - SyncFuture future = syncIter.next(); - if (future.getTxid() < txid) { - markFutureDoneAndOffer(future, future.getTxid(), error); - syncIter.remove(); - } else { - break; - } - } - } - } - - @Override - protected void preAppendAndSync() { - if (markerEditOnly()) { - drainNonMarkerEditsAndFailSyncs(); - } - } - - // This is used by sync replication, where we are going to close the wal soon after we reopen all - // the regions. Will be overridden by sub classes. - protected boolean markerEditOnly() { - return false; - } - - @Override - protected void precheckBeforeAppendWALEdit(RegionInfo hri, WALKeyImpl key, WALEdit edits, - boolean inMemstore) throws IOException { - if (markerEditOnly() && !edits.isMetaEdit()) { - throw new IOException("WAL is closing, only marker edit is allowed"); - } - } - protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) throws IOException { return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false, this.blocksize, eventLoopGroup, channelClass, streamSlowMonitor); } @Override - protected AsyncWriter createWriterInstance(Path path) throws IOException { + protected AsyncWriter createWriterInstance(FileSystem fs, Path path) throws IOException { return createAsyncWriter(fs, path); } @@ -277,4 +202,10 @@ protected boolean doCheckLogLowReplication() { AsyncFSOutput output = this.fsOut; return output != null && output.isBroken(); } + + @Override + protected AsyncWriter createCombinedWriter(AsyncWriter localWriter, AsyncWriter remoteWriter) { + // put remote writer first as usually it will cost more time to finish, so we write to it first + return CombinedAsyncWriter.create(remoteWriter, localWriter); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java index e47b2c3a2f6b..d4af7dcdda28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java @@ -19,14 +19,11 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; -import java.io.IOException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; @@ -34,43 +31,11 @@ * An {@link AsyncWriter} wrapper which writes data to a set of {@link AsyncWriter} instances. */ @InterfaceAudience.Private -public final class CombinedAsyncWriter implements AsyncWriter { - - private static final Logger LOG = LoggerFactory.getLogger(CombinedAsyncWriter.class); - - private final ImmutableList writers; +public final class CombinedAsyncWriter extends CombinedWriterBase + implements AsyncWriter { private CombinedAsyncWriter(ImmutableList writers) { - this.writers = writers; - } - - @Override - public long getLength() { - return writers.get(0).getLength(); - } - - @Override - public long getSyncedLength() { - return writers.get(0).getSyncedLength(); - } - - @Override - public void close() throws IOException { - Exception error = null; - for (AsyncWriter writer : writers) { - try { - writer.close(); - } catch (Exception e) { - LOG.warn("close writer failed", e); - if (error == null) { - error = e; - } - } - } - if (error != null) { - throw new IOException("Failed to close at least one writer, please see the warn log above. " - + "The cause is the first exception occurred", error); - } + super(writers); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriter.java new file mode 100644 index 000000000000..1a38b5a1f4df --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriter.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.wal; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.hadoop.hbase.wal.WALProvider.Writer; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +@InterfaceAudience.Private +public final class CombinedWriter extends CombinedWriterBase implements Writer { + + private final ImmutableList executors; + + private CombinedWriter(ImmutableList writers) { + super(writers); + ImmutableList.Builder builder = + ImmutableList.builderWithExpectedSize(writers.size() - 1); + for (int i = 0; i < writers.size() - 1; i++) { + Writer writer = writers.get(i); + builder.add(Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setNameFormat("WAL-Writer-" + writer + "-%d").setDaemon(true).build())); + } + this.executors = builder.build(); + } + + private interface Action { + void action(Writer writer) throws IOException; + } + + private void apply(Action action) throws IOException { + List> futures = new ArrayList<>(writers.size() - 1); + for (int i = 0; i < writers.size() - 1; i++) { + Writer writer = writers.get(i); + futures.add(executors.get(i).submit(new Callable() { + + @Override + public Void call() throws Exception { + action.action(writer); + return null; + } + })); + } + action.action(writers.get(writers.size() - 1)); + for (Future future : futures) { + FutureUtils.get(future); + } + } + + @Override + public void sync(boolean forceSync) throws IOException { + apply(writer -> writer.sync(forceSync)); + } + + @Override + public void append(Entry entry) throws IOException { + apply(writer -> writer.append(entry)); + } + + @Override + public void close() throws IOException { + executors.forEach(ExecutorService::shutdown); + super.close(); + } + + public static CombinedWriter create(Writer writer, Writer... writers) { + return new CombinedWriter(ImmutableList. builder().add(writer).add(writers).build()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriterBase.java new file mode 100644 index 000000000000..76244dbe941a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedWriterBase.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.wal; + +import java.io.IOException; +import org.apache.hadoop.hbase.wal.WALProvider.WriterBase; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + +/** + * Base class for combined wal writer implementations. + */ +@InterfaceAudience.Private +public class CombinedWriterBase implements WriterBase { + + private static final Logger LOG = LoggerFactory.getLogger(CombinedWriterBase.class); + + // the order of this list is not critical now as we have already solved the case where writing to + // local succeed but remote fail, so implementation should implement concurrent sync to increase + // performance + protected final ImmutableList writers; + + protected CombinedWriterBase(ImmutableList writers) { + this.writers = writers; + } + + @Override + public void close() throws IOException { + Exception error = null; + for (T writer : writers) { + try { + writer.close(); + } catch (Exception e) { + LOG.warn("close writer failed", e); + if (error == null) { + error = e; + } + } + } + if (error != null) { + throw new IOException("Failed to close at least one writer, please see the warn log above. " + + "The cause is the first exception occurred", error); + } + } + + @Override + public long getLength() { + return writers.get(0).getLength(); + } + + @Override + public long getSyncedLength() { + return writers.get(0).getSyncedLength(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java deleted file mode 100644 index 467675f770f6..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.wal; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.ConnectionUtils; -import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -import org.apache.hbase.thirdparty.io.netty.channel.Channel; -import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; - -/** - * An AsyncFSWAL which writes data to two filesystems. - */ -@InterfaceAudience.Private -public class DualAsyncFSWAL extends AsyncFSWAL { - - private static final Logger LOG = LoggerFactory.getLogger(DualAsyncFSWAL.class); - - private final FileSystem remoteFs; - - private final Path remoteWALDir; - - private volatile boolean skipRemoteWAL = false; - - private volatile boolean markerEditOnly = false; - - public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path remoteWALDir, - String logDir, String archiveDir, Configuration conf, List listeners, - boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass); - this.remoteFs = remoteFs; - this.remoteWALDir = remoteWALDir; - } - - // will be overridden in testcase - protected AsyncWriter createCombinedAsyncWriter(AsyncWriter localWriter, - AsyncWriter remoteWriter) { - return CombinedAsyncWriter.create(remoteWriter, localWriter); - } - - @Override - protected AsyncWriter createWriterInstance(Path path) throws IOException { - AsyncWriter localWriter = super.createWriterInstance(path); - // retry forever if we can not create the remote writer to prevent aborting the RS due to log - // rolling error, unless the skipRemoteWal is set to true. - // TODO: since for now we only have one thread doing log rolling, this may block the rolling for - // other wals - Path remoteWAL = new Path(remoteWALDir, path.getName()); - for (int retry = 0;; retry++) { - if (skipRemoteWAL) { - return localWriter; - } - AsyncWriter remoteWriter; - try { - remoteWriter = createAsyncWriter(remoteFs, remoteWAL); - } catch (IOException e) { - LOG.warn("create remote writer {} failed, retry = {}", remoteWAL, retry, e); - try { - Thread.sleep(ConnectionUtils.getPauseTime(100, retry)); - } catch (InterruptedException ie) { - // restore the interrupt state - Thread.currentThread().interrupt(); - Closeables.close(localWriter, true); - throw (IOException) new InterruptedIOException().initCause(ie); - } - continue; - } - return createCombinedAsyncWriter(localWriter, remoteWriter); - } - } - - @Override - protected boolean markerEditOnly() { - return markerEditOnly; - } - - // Allow temporarily skipping the creation of remote writer. When failing to write to the remote - // dfs cluster, we need to reopen the regions and switch to use the original wal writer. But we - // need to write a close marker when closing a region, and if it fails, the whole rs will abort. - // So here we need to skip the creation of remote writer and make it possible to write the region - // close marker. - // Setting markerEdit only to true is for transiting from A to S, where we need to give up writing - // any pending wal entries as they will be discarded. The remote cluster will replicated the - // correct data back later. We still need to allow writing marker edits such as close region event - // to allow closing a region. - public void skipRemoteWAL(boolean markerEditOnly) { - if (markerEditOnly) { - this.markerEditOnly = true; - } - this.skipRemoteWAL = true; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 28e6a460316a..d0d5ce5f2e17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -154,13 +154,14 @@ public FSHLog(final FileSystem fs, final Path root, final String logDir, final C public FSHLog(final FileSystem fs, Abortable abortable, final Path root, final String logDir, final Configuration conf) throws IOException { this(fs, abortable, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, - null); + null, null, null); } public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { - this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, + null, null); } /** @@ -185,9 +186,9 @@ public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir, final String logDir, final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) throws IOException { + final String suffix, FileSystem remoteFs, Path remoteWALDir) throws IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix); + suffix, remoteFs, remoteWALDir); this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); this.lowReplicationRollLimit = @@ -254,7 +255,7 @@ private void preemptiveSync(final ProtobufLogWriter nextWriter) { * @return Writer instance */ @Override - protected Writer createWriterInstance(final Path path) throws IOException { + protected Writer createWriterInstance(FileSystem fs, Path path) throws IOException { Writer writer = FSHLogProvider.createWriter(conf, fs, path, false, this.blocksize); if (writer instanceof ProtobufLogWriter) { preemptiveSync((ProtobufLogWriter) writer); @@ -610,4 +611,9 @@ void setWriter(Writer writer) { this.writer = writer; } + @Override + protected Writer createCombinedWriter(Writer localWriter, Writer remoteWriter) { + // put remote writer first as usually it will cost more time to finish, so we write to it first + return CombinedWriter.create(remoteWriter, localWriter); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java index b55699331c2b..fb96eb9011bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java @@ -21,9 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Get notification for replication peer events. Mainly used for telling the - * {@link org.apache.hadoop.hbase.wal.SyncReplicationWALProvider} to close some WAL if not used any - * more. + * Get notification for replication peer events. Mainly used for telling the {@code WALProvider} to + * close some remote WAL if not used any more. */ @InterfaceAudience.Private public interface PeerActionListener { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 6279c4b9596c..1ed92e2fd427 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; @@ -118,35 +117,34 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.replicationLoad = new ReplicationLoad(); + this.syncReplicationPeerInfoProvider = new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping); - PeerActionListener peerActionListener = PeerActionListener.DUMMY; // Get the user-space WAL provider WALProvider walProvider = walFactory != null ? walFactory.getWALProvider() : null; if (walProvider != null) { walProvider .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); - if (walProvider instanceof SyncReplicationWALProvider) { - SyncReplicationWALProvider syncWALProvider = (SyncReplicationWALProvider) walProvider; - peerActionListener = syncWALProvider; - syncWALProvider.setPeerInfoProvider(syncReplicationPeerInfoProvider); - // for sync replication state change, we need to reload the state twice, you can see the - // code in PeerProcedureHandlerImpl, so here we need to go over the sync replication peers - // to see if any of them are in the middle of the two refreshes, if so, we need to manually - // repeat the action we have done in the first refresh, otherwise when the second refresh - // comes we will be in trouble, such as NPE. - replicationPeers.getAllPeerIds().stream().map(replicationPeers::getPeer) - .filter(p -> p.getPeerConfig().isSyncReplication()) - .filter(p -> p.getNewSyncReplicationState() != SyncReplicationState.NONE) - .forEach(p -> syncWALProvider.peerSyncReplicationStateChange(p.getId(), - p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); - } + PeerActionListener peerActionListener = walProvider.getPeerActionListener(); + walProvider.setSyncReplicationPeerInfoProvider(syncReplicationPeerInfoProvider); + // for sync replication state change, we need to reload the state twice, you can see the + // code in PeerProcedureHandlerImpl, so here we need to go over the sync replication peers + // to see if any of them are in the middle of the two refreshes, if so, we need to manually + // repeat the action we have done in the first refresh, otherwise when the second refresh + // comes we will be in trouble, such as NPE. + replicationPeers.getAllPeerIds().stream().map(replicationPeers::getPeer) + .filter(p -> p.getPeerConfig().isSyncReplication()) + .filter(p -> p.getNewSyncReplicationState() != SyncReplicationState.NONE) + .forEach(p -> peerActionListener.peerSyncReplicationStateChange(p.getId(), + p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); + this.peerProcedureHandler = + new PeerProcedureHandlerImpl(replicationManager, peerActionListener); + } else { + this.peerProcedureHandler = + new PeerProcedureHandlerImpl(replicationManager, PeerActionListener.DUMMY); } - this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); - this.replicationLoad = new ReplicationLoad(); - - this.peerProcedureHandler = - new PeerProcedureHandlerImpl(replicationManager, peerActionListener); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index d54cda92d901..ffaabe7e3399 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -66,7 +66,7 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.AbstractWALProvider; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; @@ -729,7 +729,7 @@ private void cleanOldLogs(NavigableSet wals, ReplicationSourceInterface // special format, and also, the peer id in its name should match the peer id for the // replication source. List remoteWals = - wals.stream().filter(w -> SyncReplicationWALProvider.getSyncReplicationPeerIdFromWALName(w) + wals.stream().filter(w -> AbstractWALProvider.getSyncReplicationPeerIdFromWALName(w) .map(peerId::equals).orElse(false)).collect(Collectors.toList()); LOG.debug("Removing {} logs from remote dir {} in the list: {}", remoteWals.size(), remoteWALDir, remoteWals); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index cd6a4d9ac4d1..a4ca20fa7311 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -272,11 +272,12 @@ public boolean isAborted() { getConf().setClass(ReplicationStorageFactory.REPLICATION_QUEUE_IMPL, OfflineTableReplicationQueueStorage.class, ReplicationQueueStorage.class); DummyServer server = new DummyServer(getConf(), zkw); - replication.initialize(server, fs, new Path(logDir, server.toString()), oldLogDir, - new WALFactory(conf, - ServerName - .valueOf(getClass().getSimpleName() + ",16010," + EnvironmentEdgeManager.currentTime()), - null, false)); + replication + .initialize(server, fs, new Path(logDir, server.toString()), oldLogDir, + new WALFactory(conf, + ServerName.valueOf( + getClass().getSimpleName() + ",16010," + EnvironmentEdgeManager.currentTime()), + null)); ReplicationSourceManager manager = replication.getReplicationManager(); manager.init(); Set regionServers = listRegionServers(fs, logDir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 5dc40dd60493..13d2886182e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -26,7 +26,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; @@ -35,7 +34,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -64,7 +62,8 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class AbstractFSWALProvider> implements WALProvider { +public abstract class AbstractFSWALProvider> + extends AbstractWALProvider { private static final Logger LOG = LoggerFactory.getLogger(AbstractFSWALProvider.class); @@ -84,14 +83,6 @@ public interface Initializer { } protected volatile T wal; - protected WALFactory factory; - protected Configuration conf; - protected List listeners = new ArrayList<>(); - protected String providerId; - protected AtomicBoolean initialized = new AtomicBoolean(false); - // for default wal provider, logPrefix won't change - protected String logPrefix; - protected Abortable abortable; /** * We use walCreateLock to prevent wal recreation in different threads, and also prevent getWALs @@ -106,13 +97,8 @@ public interface Initializer { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + protected void doInit(WALFactory factory, Configuration conf, String providerId) throws IOException { - if (!initialized.compareAndSet(false, true)) { - throw new IllegalStateException("WALProvider.init should only be called once."); - } - this.factory = factory; - this.conf = conf; this.providerId = providerId; // get log prefix StringBuilder sb = new StringBuilder().append(factory.factoryId); @@ -124,12 +110,11 @@ public void init(WALFactory factory, Configuration conf, String providerId, Abor } } logPrefix = sb.toString(); - this.abortable = abortable; doInit(conf); } @Override - public List getWALs() { + protected List getWALs0() { if (wal != null) { return Lists.newArrayList(wal); } @@ -146,7 +131,7 @@ public List getWALs() { } @Override - public T getWAL(RegionInfo region) throws IOException { + protected T getWAL0(RegionInfo region) throws IOException { T walCopy = wal; if (walCopy != null) { return walCopy; @@ -158,15 +143,7 @@ public T getWAL(RegionInfo region) throws IOException { return walCopy; } walCopy = createWAL(); - boolean succ = false; - try { - walCopy.init(); - succ = true; - } finally { - if (!succ) { - walCopy.close(); - } - } + initWAL(walCopy); wal = walCopy; return walCopy; } finally { @@ -179,7 +156,7 @@ public T getWAL(RegionInfo region) throws IOException { protected abstract void doInit(Configuration conf) throws IOException; @Override - public void shutdown() throws IOException { + protected void shutdown0() throws IOException { T log = this.wal; if (log != null) { log.shutdown(); @@ -187,7 +164,7 @@ public void shutdown() throws IOException { } @Override - public void close() throws IOException { + protected void close0() throws IOException { T log = this.wal; if (log != null) { log.close(); @@ -199,7 +176,7 @@ public void close() throws IOException { * number of files (rolled and active). if either of them aren't, count 0 for that provider. */ @Override - public long getNumLogFiles() { + protected long getNumLogFiles0() { T log = this.wal; return log == null ? 0 : log.getNumLogFiles(); } @@ -209,11 +186,19 @@ public long getNumLogFiles() { * size of files (only rolled). if either of them aren't, count 0 for that provider. */ @Override - public long getLogFileSize() { + protected long getLogFileSize0() { T log = this.wal; return log == null ? 0 : log.getLogFileSize(); } + @Override + protected WAL createRemoteWAL(RegionInfo region, FileSystem remoteFs, Path remoteWALDir, + String prefix, String suffix) throws IOException { + // so we do not need to add this for a lot of test classes, for normal WALProvider, you should + // implement this method to support sync replication. + throw new UnsupportedOperationException(); + } + /** * returns the number of rolled WAL files. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java similarity index 50% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java index e94d87aa698c..e9c63fb52170 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java @@ -17,12 +17,7 @@ */ package org.apache.hadoop.hbase.wal; -import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALArchiveDirectoryName; -import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALDirectoryName; - import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -36,132 +31,113 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener; import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider; -import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.IOExceptionConsumer; +import org.apache.hadoop.hbase.util.IOExceptionRunnable; import org.apache.hadoop.hbase.util.KeyLocker; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.io.MultipleIOException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.collect.Streams; -import org.apache.hbase.thirdparty.io.netty.channel.Channel; -import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; /** - * The special {@link WALProvider} for synchronous replication. + * Base class for a WAL Provider. + *

+ * We will put some common logic here, especially for sync replication implementation, as it must do + * some hacks before the normal wal creation operation. *

- * It works like an interceptor, when getting WAL, first it will check if the given region should be - * replicated synchronously, if so it will return a special WAL for it, otherwise it will delegate - * the request to the normal {@link WALProvider}. + * All {@link WALProvider} implementations should extends this class instead of implement + * {@link WALProvider} directly, except {@link DisabledWALProvider}. */ @InterfaceAudience.Private -public class SyncReplicationWALProvider implements WALProvider, PeerActionListener { - - private static final Logger LOG = LoggerFactory.getLogger(SyncReplicationWALProvider.class); - - // only for injecting errors for testcase, do not use it for other purpose. - public static final String DUAL_WAL_IMPL = "hbase.wal.sync.impl"; - - private final WALProvider provider; - - private SyncReplicationPeerInfoProvider peerInfoProvider = - new DefaultSyncReplicationPeerInfoProvider(); - - private WALFactory factory; - - private Configuration conf; +public abstract class AbstractWALProvider implements WALProvider, PeerActionListener { - private List listeners = new ArrayList<>(); + private static final Logger LOG = LoggerFactory.getLogger(AbstractWALProvider.class); - private EventLoopGroup eventLoopGroup; + // should be package private; more visible for use in AbstractFSWAL + public static final String WAL_FILE_NAME_DELIMITER = "."; - private Class channelClass; - - private AtomicBoolean initialized = new AtomicBoolean(false); + protected WALFactory factory; + protected Configuration conf; + protected List listeners = new ArrayList<>(); + protected String providerId; + protected AtomicBoolean initialized = new AtomicBoolean(false); + // for default wal provider, logPrefix won't change + protected String logPrefix; + protected Abortable abortable; // when switching from A to DA, we will put a Optional.empty into this map if there is no WAL for // the peer yet. When getting WAL from this map the caller should know that it should not use - // DualAsyncFSWAL any more. - private final ConcurrentMap> peerId2WAL = - new ConcurrentHashMap<>(); + // the remote WAL any more. + private final ConcurrentMap> peerId2WAL = new ConcurrentHashMap<>(); private final KeyLocker createLock = new KeyLocker<>(); - SyncReplicationWALProvider(WALProvider provider) { - this.provider = provider; - } + // we need to have this because when getting meta wal, there is no peer info provider yet. + private SyncReplicationPeerInfoProvider peerInfoProvider = new SyncReplicationPeerInfoProvider() { - public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider) { - this.peerInfoProvider = peerInfoProvider; - } + @Override + public Optional> getPeerIdAndRemoteWALDir(TableName table) { + return Optional.empty(); + } + + @Override + public boolean checkState(TableName table, + BiPredicate checker) { + return false; + } + + }; @Override - public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) - throws IOException { + public final void init(WALFactory factory, Configuration conf, String providerId, + Abortable server) throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } - provider.init(factory, conf, providerId, abortable); - this.conf = conf; this.factory = factory; - Pair> eventLoopGroupAndChannelClass = - NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); - eventLoopGroup = eventLoopGroupAndChannelClass.getFirst(); - channelClass = eventLoopGroupAndChannelClass.getSecond(); + this.conf = conf; + this.abortable = server; + doInit(factory, conf, providerId); + } + + protected final void initWAL(WAL wal) throws IOException { + boolean succ = false; + try { + wal.init(); + succ = true; + } finally { + if (!succ) { + safeClose(wal); + } + } } // Use a timestamp to make it identical. That means, after we transit the peer to DA/S and then // back to A, the log prefix will be changed. This is used to simplify the implementation for // replication source, where we do not need to consider that a terminated shipper could be added // back. - private String getLogPrefix(String peerId) { + private String getRemoteWALPrefix(String peerId) { return factory.factoryId + "-" + EnvironmentEdgeManager.currentTime() + "-" + peerId; } - private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOException { - Class clazz = - conf.getClass(DUAL_WAL_IMPL, DualAsyncFSWAL.class, DualAsyncFSWAL.class); - try { - Constructor constructor = null; - for (Constructor c : clazz.getDeclaredConstructors()) { - if (c.getParameterCount() > 0) { - constructor = c; - break; - } - } - if (constructor == null) { - throw new IllegalArgumentException("No valid constructor provided for class " + clazz); - } - constructor.setAccessible(true); - return (DualAsyncFSWAL) constructor.newInstance(CommonFSUtils.getWALFileSystem(conf), - ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir), - CommonFSUtils.getWALRootDir(conf), - ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId), - getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), - conf, listeners, true, getLogPrefix(peerId), ReplicationUtils.SYNC_WAL_SUFFIX, - eventLoopGroup, channelClass); - } catch (InstantiationException | IllegalAccessException e) { - throw new RuntimeException(e); - } catch (InvocationTargetException e) { - Throwable cause = e.getTargetException(); - Throwables.propagateIfPossible(cause, IOException.class); - throw new RuntimeException(cause); - } - } - - private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOException { - Optional opt = peerId2WAL.get(peerId); + private WAL getRemoteWAL(RegionInfo region, String peerId, String remoteWALDir) + throws IOException { + Optional opt = peerId2WAL.get(peerId); if (opt != null) { return opt.orElse(null); } @@ -171,16 +147,10 @@ private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOExcep if (opt != null) { return opt.orElse(null); } - DualAsyncFSWAL wal = createWAL(peerId, remoteWALDir); - boolean succ = false; - try { - wal.init(); - succ = true; - } finally { - if (!succ) { - wal.close(); - } - } + WAL wal = createRemoteWAL(region, ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir), + ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId), getRemoteWALPrefix(peerId), + ReplicationUtils.SYNC_WAL_SUFFIX); + initWAL(wal); peerId2WAL.put(peerId, Optional.of(wal)); return wal; } finally { @@ -189,83 +159,66 @@ private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOExcep } @Override - public WAL getWAL(RegionInfo region) throws IOException { + public final WAL getWAL(RegionInfo region) throws IOException { if (region == null) { - return provider.getWAL(null); + return getWAL0(null); } - WAL wal = null; + // deal with sync replication Optional> peerIdAndRemoteWALDir = peerInfoProvider.getPeerIdAndRemoteWALDir(region.getTable()); if (peerIdAndRemoteWALDir.isPresent()) { Pair pair = peerIdAndRemoteWALDir.get(); - wal = getWAL(pair.getFirst(), pair.getSecond()); + WAL wal = getRemoteWAL(region, pair.getFirst(), pair.getSecond()); + if (wal != null) { + return wal; + } } - return wal != null ? wal : provider.getWAL(region); - } - - private Stream getWALStream() { - return Streams.concat( - peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get), - provider.getWALs().stream()); + // fallback to normal WALProvider logic + return getWAL0(region); } @Override - public List getWALs() { - return getWALStream().collect(Collectors.toList()); + public final List getWALs() { + return Streams + .concat(peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get), + getWALs0().stream()) + .collect(Collectors.toList()); } @Override - public void shutdown() throws IOException { - // save the last exception and rethrow - IOException failure = null; - for (Optional wal : peerId2WAL.values()) { - if (wal.isPresent()) { - try { - wal.get().shutdown(); - } catch (IOException e) { - LOG.error("Shutdown WAL failed", e); - failure = e; - } - } - } - provider.shutdown(); - if (failure != null) { - throw failure; - } + public PeerActionListener getPeerActionListener() { + return this; } @Override - public void close() throws IOException { - // save the last exception and rethrow - IOException failure = null; - for (Optional wal : peerId2WAL.values()) { - if (wal.isPresent()) { + public void peerSyncReplicationStateChange(String peerId, SyncReplicationState from, + SyncReplicationState to, int stage) { + if (from == SyncReplicationState.ACTIVE) { + if (stage == 0) { + Lock lock = createLock.acquireLock(peerId); try { - wal.get().close(); - } catch (IOException e) { - LOG.error("Close WAL failed", e); - failure = e; + Optional opt = peerId2WAL.get(peerId); + if (opt != null) { + opt.ifPresent(w -> w.skipRemoteWAL(to == SyncReplicationState.STANDBY)); + } else { + // add a place holder to tell the getWAL caller do not use the remote WAL any more. + peerId2WAL.put(peerId, Optional.empty()); + } + } finally { + lock.unlock(); } + } else if (stage == 1) { + peerId2WAL.remove(peerId).ifPresent(AbstractWALProvider::safeClose); } } - provider.close(); - if (failure != null) { - throw failure; - } } @Override - public long getNumLogFiles() { - return peerId2WAL.size() + provider.getNumLogFiles(); + public void setSyncReplicationPeerInfoProvider(SyncReplicationPeerInfoProvider provider) { + this.peerInfoProvider = provider; } - @Override - public long getLogFileSize() { - return peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get) - .mapToLong(DualAsyncFSWAL::getLogFileSize).sum() + provider.getLogFileSize(); - } - - private void safeClose(WAL wal) { + private static void safeClose(WAL wal) { if (wal != null) { try { wal.close(); @@ -278,45 +231,55 @@ private void safeClose(WAL wal) { @Override public void addWALActionsListener(WALActionsListener listener) { listeners.add(listener); - provider.addWALActionsListener(listener); } - @Override - public void peerSyncReplicationStateChange(String peerId, SyncReplicationState from, - SyncReplicationState to, int stage) { - if (from == SyncReplicationState.ACTIVE) { - if (stage == 0) { - Lock lock = createLock.acquireLock(peerId); + private void cleanup(IOExceptionConsumer cleanupWAL, IOExceptionRunnable finalCleanup) + throws IOException { + MultipleIOException.Builder builder = new MultipleIOException.Builder(); + for (Optional wal : peerId2WAL.values()) { + if (wal.isPresent()) { try { - Optional opt = peerId2WAL.get(peerId); - if (opt != null) { - opt.ifPresent(w -> w.skipRemoteWAL(to == SyncReplicationState.STANDBY)); - } else { - // add a place holder to tell the getWAL caller do not use DualAsyncFSWAL any more. - peerId2WAL.put(peerId, Optional.empty()); - } - } finally { - lock.unlock(); + cleanupWAL.accept(wal.get()); + } catch (IOException e) { + LOG.error("cleanup WAL failed", e); + builder.add(e); } - } else if (stage == 1) { - peerId2WAL.remove(peerId).ifPresent(this::safeClose); } } + try { + finalCleanup.run(); + } catch (IOException e) { + LOG.error("cleanup WAL failed", e); + builder.add(e); + } + if (!builder.isEmpty()) { + throw builder.build(); + } } - private static class DefaultSyncReplicationPeerInfoProvider - implements SyncReplicationPeerInfoProvider { + @Override + public final void shutdown() throws IOException { + cleanup(WAL::shutdown, this::shutdown0); + } - @Override - public Optional> getPeerIdAndRemoteWALDir(TableName table) { - return Optional.empty(); - } + @Override + public final void close() throws IOException { + cleanup(WAL::close, this::close0); + } - @Override - public boolean checkState(TableName table, - BiPredicate checker) { - return false; - } + private Stream> remoteWALStream() { + return peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get) + .filter(w -> w instanceof AbstractFSWAL).map(w -> (AbstractFSWAL) w); + } + + @Override + public final long getNumLogFiles() { + return remoteWALStream().mapToLong(AbstractFSWAL::getNumLogFiles).sum() + getNumLogFiles0(); + } + + @Override + public final long getLogFileSize() { + return remoteWALStream().mapToLong(AbstractFSWAL::getLogFileSize).sum() + getLogFileSize0(); } private static final Pattern LOG_PREFIX_PATTERN = Pattern.compile(".*-\\d+-(.+)"); @@ -343,8 +306,22 @@ public static Optional getSyncReplicationPeerIdFromWALName(String name) } } - WALProvider getWrappedProvider() { - return provider; - } + protected abstract WAL createRemoteWAL(RegionInfo region, FileSystem remoteFs, Path remoteWALDir, + String prefix, String suffix) throws IOException; + + protected abstract void doInit(WALFactory factory, Configuration conf, String providerId) + throws IOException; + + protected abstract WAL getWAL0(RegionInfo region) throws IOException; + + protected abstract List getWALs0(); + + protected abstract void shutdown0() throws IOException; + + protected abstract void close0() throws IOException; + + protected abstract long getNumLogFiles0(); + + protected abstract long getLogFileSize0(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 2071d78e843d..678e75541be8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput; import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper; import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper; @@ -77,8 +78,19 @@ protected AsyncFSWAL createWAL() throws IOException { return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, eventLoopGroup, - channelClass, factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, null, null, + eventLoopGroup, channelClass, + factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); + } + + @Override + protected WAL createRemoteWAL(RegionInfo region, FileSystem remoteFs, Path remoteWALDir, + String prefix, String suffix) throws IOException { + return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), + getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, prefix, suffix, + remoteFs, remoteWALDir, eventLoopGroup, channelClass, + factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index 8384f8f511a8..979354191514 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter; @@ -100,10 +101,20 @@ protected FSHLog createWAL() throws IOException { return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, null, null); + } + + @Override + protected WAL createRemoteWAL(RegionInfo region, FileSystem remoteFs, Path remoteWALDir, + String prefix, String suffix) throws IOException { + return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), + getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, prefix, suffix, + remoteFs, remoteWALDir); } @Override protected void doInit(Configuration conf) throws IOException { } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 4e4748be3a0a..226e87b4eec2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -18,21 +18,19 @@ package org.apache.hadoop.hbase.wal; import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.META_WAL_PROVIDER_ID; -import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; -import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.KeyLocker; import org.apache.yetus.audience.InterfaceAudience; @@ -53,7 +51,7 @@ * Optionally, a FQCN to a custom implementation may be given. */ @InterfaceAudience.Private -public class RegionGroupingProvider implements WALProvider { +public class RegionGroupingProvider extends AbstractWALProvider { private static final Logger LOG = LoggerFactory.getLogger(RegionGroupingProvider.class); /** @@ -130,22 +128,15 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, private final KeyLocker createLock = new KeyLocker<>(); private RegionGroupingStrategy strategy; - private WALFactory factory; - private Configuration conf; - private List listeners = new ArrayList<>(); - private String providerId; + private Class providerClass; - private Abortable abortable; @Override - public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + protected void doInit(WALFactory factory, Configuration conf, String providerId) throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } - this.conf = conf; - this.factory = factory; - this.abortable = abortable; if (META_WAL_PROVIDER_ID.equals(providerId)) { // do not change the provider id if it is for meta @@ -177,12 +168,22 @@ private WALProvider createProvider(String group) throws IOException { return provider; } - @Override - public List getWALs() { - return cached.values().stream().flatMap(p -> p.getWALs().stream()).collect(Collectors.toList()); - } - - private WAL getWAL(String group) throws IOException { + private WALProvider getWALProvider(RegionInfo region) throws IOException { + String group; + if (META_WAL_PROVIDER_ID.equals(this.providerId)) { + group = META_WAL_GROUP_NAME; + } else { + byte[] id; + byte[] namespace; + if (region != null) { + id = region.getEncodedNameAsBytes(); + namespace = region.getTable().getNamespace(); + } else { + id = HConstants.EMPTY_BYTE_ARRAY; + namespace = null; + } + group = strategy.group(id, namespace); + } WALProvider provider = cached.get(group); if (provider == null) { Lock lock = createLock.acquireLock(group); @@ -190,6 +191,12 @@ private WAL getWAL(String group) throws IOException { provider = cached.get(group); if (provider == null) { provider = createProvider(group); + // Notice that there is an assumption that the addWALActionsListener method must be called + // before the getWAL method, so we can make sure there is no sub WALProvider yet, so we + // only add the listener to our listeners list without calling addWALActionListener for + // each WALProvider. Although it is no hurt to execute an extra loop to call + // addWALActionListener for each WALProvider, but if the extra code actually works, then + // we will have other big problems. So leave it as is. listeners.forEach(provider::addWALActionsListener); cached.put(group, provider); } @@ -197,11 +204,11 @@ private WAL getWAL(String group) throws IOException { lock.unlock(); } } - return provider.getWAL(null); + return provider; } @Override - public WAL getWAL(RegionInfo region) throws IOException { + protected WAL getWAL0(RegionInfo region) throws IOException { String group; if (META_WAL_PROVIDER_ID.equals(this.providerId)) { group = META_WAL_GROUP_NAME; @@ -221,7 +228,53 @@ public WAL getWAL(RegionInfo region) throws IOException { } @Override - public void shutdown() throws IOException { + protected List getWALs0() { + return cached.values().stream().flatMap(p -> p.getWALs().stream()).collect(Collectors.toList()); + } + + private WAL getWAL(String group) throws IOException { + WALProvider provider = cached.get(group); + if (provider == null) { + Lock lock = createLock.acquireLock(group); + try { + provider = cached.get(group); + if (provider == null) { + provider = createProvider(group); + listeners.forEach(provider::addWALActionsListener); + cached.put(group, provider); + } + } finally { + lock.unlock(); + } + } + return provider.getWAL(null); + } + + static class IdentityGroupingStrategy implements RegionGroupingStrategy { + @Override + public void init(Configuration config, String providerId) { + } + + @Override + public String group(final byte[] identifier, final byte[] namespace) { + return Bytes.toString(identifier); + } + } + + @Override + protected WAL createRemoteWAL(RegionInfo region, FileSystem remoteFs, Path remoteWALDir, + String prefix, String suffix) throws IOException { + WALProvider provider = getWALProvider(region); + if (provider instanceof AbstractWALProvider) { + return ((AbstractWALProvider) provider).createRemoteWAL(region, remoteFs, remoteWALDir, + prefix, suffix); + } + throw new IOException( + provider.getClass().getSimpleName() + " does not support creating remote WAL"); + } + + @Override + protected void shutdown0() throws IOException { // save the last exception and rethrow IOException failure = null; for (WALProvider provider : cached.values()) { @@ -241,7 +294,7 @@ public void shutdown() throws IOException { } @Override - public void close() throws IOException { + protected void close0() throws IOException { // save the last exception and rethrow IOException failure = null; for (WALProvider provider : cached.values()) { @@ -260,19 +313,8 @@ public void close() throws IOException { } } - static class IdentityGroupingStrategy implements RegionGroupingStrategy { - @Override - public void init(Configuration config, String providerId) { - } - - @Override - public String group(final byte[] identifier, final byte[] namespace) { - return Bytes.toString(identifier); - } - } - @Override - public long getNumLogFiles() { + protected long getNumLogFiles0() { long numLogFiles = 0; for (WALProvider provider : cached.values()) { numLogFiles += provider.getNumLogFiles(); @@ -281,7 +323,7 @@ public long getNumLogFiles() { } @Override - public long getLogFileSize() { + protected long getLogFileSize0() { long logFileSize = 0; for (WALProvider provider : cached.values()) { logFileSize += provider.getLogFileSize(); @@ -289,13 +331,4 @@ public long getLogFileSize() { return logFileSize; } - @Override - public void addWALActionsListener(WALActionsListener listener) { - // Notice that there is an assumption that this method must be called before the getWAL above, - // so we can make sure there is no sub WALProvider yet, so we only add the listener to our - // listeners list without calling addWALActionListener for each WALProvider. Although it is no - // hurt to execute an extra loop to call addWALActionListener for each WALProvider, but if the - // extra code actually works, then we will have other big problems. So leave it as is. - listeners.add(listener); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 8f6278cf4394..2bdb1e41eb5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -42,6 +42,12 @@ @InterfaceStability.Evolving public interface WAL extends Closeable, WALFileLengthProvider { + /** + * Used to initialize the WAL. Usually this is for creating the first writer. + */ + default void init() throws IOException { + } + /** * Registers WALActionsListener */ @@ -224,6 +230,14 @@ default void sync(long txid, boolean forceSync) throws IOException { */ long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName); + /** + * Tell the WAL that when creating new writer you can skip creating the remote writer. + *

+ * Used by sync replication for switching states from ACTIVE, where the remote cluster is broken. + */ + default void skipRemoteWAL(boolean markerEditOnly) { + } + /** * Human readable identifying information about the state of this WAL. Implementors are encouraged * to include information appropriate for debugging. Consumers are advised not to rely on the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 63bef79fa455..4d3f1f1ec085 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -211,7 +211,7 @@ static WALProvider createProvider(Class clazz) throws IOE public WALFactory(Configuration conf, String factoryId) throws IOException { // default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider // for HMaster or HRegionServer which take system table only. See HBASE-19999 - this(conf, factoryId, null, true); + this(conf, factoryId, null); } /** @@ -220,30 +220,25 @@ public WALFactory(Configuration conf, String factoryId) throws IOException { * This is the constructor you should use when creating a WALFactory in normal code, to make sure * that the {@code factoryId} is the server name. We need this assumption in some places for * parsing the server name out from the wal file name. - * @param conf must not be null, will keep a reference to read params - * in later reader/writer instances. - * @param serverName use to generate the factoryId, which will be append at - * the first of the final file name - * @param abortable the server associated with this WAL file - * @param enableSyncReplicationWALProvider whether wrap the wal provider to a - * {@link SyncReplicationWALProvider} n + * @param conf must not be null, will keep a reference to read params in later reader/writer + * instances. + * @param serverName use to generate the factoryId, which will be append at the first of the final + * file name + * @param abortable the server associated with this WAL file */ - public WALFactory(Configuration conf, ServerName serverName, Abortable abortable, - boolean enableSyncReplicationWALProvider) throws IOException { - this(conf, serverName.toString(), abortable, enableSyncReplicationWALProvider); + public WALFactory(Configuration conf, ServerName serverName, Abortable abortable) + throws IOException { + this(conf, serverName.toString(), abortable); } /** - * @param conf must not be null, will keep a reference to read params - * in later reader/writer instances. - * @param factoryId a unique identifier for this factory. used i.e. by - * filesystem implementations to make a directory - * @param abortable the server associated with this WAL file - * @param enableSyncReplicationWALProvider whether wrap the wal provider to a - * {@link SyncReplicationWALProvider} + * @param conf must not be null, will keep a reference to read params in later reader/writer + * instances. + * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations + * to make a directory + * @param abortable the server associated with this WAL file */ - private WALFactory(Configuration conf, String factoryId, Abortable abortable, - boolean enableSyncReplicationWALProvider) throws IOException { + private WALFactory(Configuration conf, String factoryId, Abortable abortable) throws IOException { // until we've moved reader/writer construction down into providers, this initialization must // happen prior to provider initialization, in case they need to instantiate a reader/writer. timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); @@ -265,9 +260,6 @@ private WALFactory(Configuration conf, String factoryId, Abortable abortable, // end required early initialization if (conf.getBoolean(WAL_ENABLED, true)) { WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); - if (enableSyncReplicationWALProvider) { - provider = new SyncReplicationWALProvider(provider); - } provider.init(this, conf, null, this.abortable); provider.addWALActionsListener(new MetricsWAL()); this.provider = provider; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index 06e445607cb3..2c78f17db64e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; +import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener; +import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider; import org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider; import org.apache.yetus.audience.InterfaceAudience; @@ -128,4 +130,12 @@ default WALFileLengthProvider getWALFileLengthProvider() { return path -> getWALs().stream().map(w -> w.getLogFileSizeIfBeingWritten(path)) .filter(o -> o.isPresent()).findAny().orElse(OptionalLong.empty()); } + + // sync replication related + default PeerActionListener getPeerActionListener() { + return PeerActionListener.DUMMY; + } + + default void setSyncReplicationPeerInfoProvider(SyncReplicationPeerInfoProvider provider) { + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index 333ec4b78b1a..9a946992b770 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -144,8 +144,8 @@ protected void archive(Pair localLogsToArchive) { } @Override - protected Writer createWriterInstance(Path path) throws IOException { - final Writer w = super.createWriterInstance(path); + protected Writer createWriterInstance(FileSystem fs, Path path) throws IOException { + final Writer w = super.createWriterInstance(fs, path); return new Writer() { @Override public void close() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 03bbbbe47ae6..6a1c285bf8db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -1210,8 +1210,8 @@ public FailAppendFlushMarkerWAL(FileSystem fs, Path root, String logDir, Configu } @Override - protected Writer createWriterInstance(Path path) throws IOException { - final Writer w = super.createWriterInstance(path); + protected Writer createWriterInstance(FileSystem fs, Path path) throws IOException { + final Writer w = super.createWriterInstance(fs, path); return new Writer() { @Override public void close() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationForWriteException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationForWriteException.java index 517fb0b828a0..88a7a2a7e73e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationForWriteException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationForWriteException.java @@ -276,17 +276,7 @@ public SlowAsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String l Class channelClass, StreamSlowMonitor monitor) throws FailedLogCloseException, IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix, eventLoopGroup, channelClass, monitor); - - } - - public SlowAsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass); - + suffix, null, null, eventLoopGroup, channelClass, monitor); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 2e7c97ef4de4..ca433a8ef717 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -397,12 +397,12 @@ public void testFailedToCreateWALIfParentRenamed() HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); long filenum = EnvironmentEdgeManager.currentTime(); Path path = wal.computeFilename(filenum); - wal.createWriterInstance(path); + wal.createWriterInstance(FS, path); Path parent = path.getParent(); path = wal.computeFilename(filenum + 1); Path newPath = new Path(parent.getParent(), parent.getName() + "-splitting"); FS.rename(parent, newPath); - wal.createWriterInstance(path); + wal.createWriterInstance(FS, path); fail("It should fail to create the new WAL"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java index 00f3947dfb49..dc075ff8f966 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.regionserver.LogRoller; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -101,8 +102,9 @@ public static void tearDownAfterClass() throws Exception { protected AbstractFSWAL newWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix) throws IOException { - AsyncFSWAL wal = new AsyncFSWAL(fs, rootDir, logDir, archiveDir, conf, listeners, - failIfWALExists, prefix, suffix, GROUP, CHANNEL_CLASS); + AsyncFSWAL wal = new AsyncFSWAL(fs, null, rootDir, logDir, archiveDir, conf, listeners, + failIfWALExists, prefix, suffix, null, null, GROUP, CHANNEL_CLASS, + StreamSlowMonitor.create(conf, "monitor")); wal.init(); return wal; } @@ -112,8 +114,9 @@ protected AbstractFSWAL newSlowWAL(FileSystem fs, Path rootDir, String logDir String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix, final Runnable action) throws IOException { - AsyncFSWAL wal = new AsyncFSWAL(fs, rootDir, logDir, archiveDir, conf, listeners, - failIfWALExists, prefix, suffix, GROUP, CHANNEL_CLASS) { + AsyncFSWAL wal = new AsyncFSWAL(fs, null, rootDir, logDir, archiveDir, conf, listeners, + failIfWALExists, prefix, suffix, null, null, GROUP, CHANNEL_CLASS, + StreamSlowMonitor.create(conf, "monitor")) { @Override protected void atHeadOfRingBufferEventHandlerAppend() { @@ -141,12 +144,13 @@ public void testBrokenWriter() throws Exception { String testName = currentTest.getMethodName(); AtomicInteger failedCount = new AtomicInteger(0); try (LogRoller roller = new LogRoller(services); - AsyncFSWAL wal = new AsyncFSWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), - testName, CONF, null, true, null, null, GROUP, CHANNEL_CLASS) { + AsyncFSWAL wal = new AsyncFSWAL(FS, null, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), + testName, CONF, null, true, null, null, null, null, GROUP, CHANNEL_CLASS, + StreamSlowMonitor.create(CONF, "monitorForSuffix")) { @Override - protected AsyncWriter createWriterInstance(Path path) throws IOException { - AsyncWriter writer = super.createWriterInstance(path); + protected AsyncWriter createWriterInstance(FileSystem fs, Path path) throws IOException { + AsyncWriter writer = super.createWriterInstance(fs, path); return new AsyncWriter() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java index 8402617c44b5..1d1ffcdac3f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; @@ -90,13 +91,14 @@ class CustomAsyncFSWAL extends AsyncFSWAL { public CustomAsyncFSWAL(FileSystem fs, Path rootDir, String logDir, Configuration conf, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null, - eventLoopGroup, channelClass); + super(fs, null, rootDir, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, + null, null, null, eventLoopGroup, channelClass, + StreamSlowMonitor.create(conf, "monitorForSuffix")); } @Override - protected AsyncWriter createWriterInstance(Path path) throws IOException { - AsyncWriter writer = super.createWriterInstance(path); + protected AsyncWriter createWriterInstance(FileSystem fs, Path path) throws IOException { + AsyncWriter writer = super.createWriterInstance(fs, path); return new AsyncWriter() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java index e0271bd16d2e..97488cdfc098 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -162,8 +163,9 @@ public void logRollRequested(RollRequestReason reason) { } }; - WAL = new AsyncFSWAL(UTIL.getTestFileSystem(), rootDir, "log", "oldlog", conf, - Arrays.asList(listener), true, null, null, EVENT_LOOP_GROUP, CHANNEL_CLASS); + WAL = new AsyncFSWAL(UTIL.getTestFileSystem(), null, rootDir, "log", "oldlog", conf, + Arrays.asList(listener), true, null, null, null, null, EVENT_LOOP_GROUP, CHANNEL_CLASS, + StreamSlowMonitor.create(conf, "monitor")); WAL.init(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java index cf752cefffb9..7427c4d83637 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Threads; @@ -69,8 +70,9 @@ public static void tearDownAfterClass() throws Exception { @Override protected WAL createWAL(Configuration c, Path hbaseRootDir, String logName) throws IOException { - AsyncFSWAL wal = new AsyncFSWAL(FileSystem.get(c), hbaseRootDir, logName, - HConstants.HREGION_OLDLOGDIR_NAME, c, null, true, null, null, GROUP, CHANNEL_CLASS); + AsyncFSWAL wal = new AsyncFSWAL(FileSystem.get(c), null, hbaseRootDir, logName, + HConstants.HREGION_OLDLOGDIR_NAME, c, null, true, null, null, null, null, GROUP, + CHANNEL_CLASS, StreamSlowMonitor.create(c, "monitor")); wal.init(); return wal; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java index 777b7b3f3d61..926092663d40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java @@ -70,8 +70,8 @@ public CustomFSHLog(FileSystem fs, Path root, String logDir, Configuration conf) } @Override - protected Writer createWriterInstance(Path path) throws IOException { - Writer writer = super.createWriterInstance(path); + protected Writer createWriterInstance(FileSystem fs, Path path) throws IOException { + Writer writer = super.createWriterInstance(fs, path); return new Writer() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALSyncTimeoutException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALSyncTimeoutException.java index 939c56a54472..a1b9b9d54a10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALSyncTimeoutException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALSyncTimeoutException.java @@ -166,17 +166,7 @@ public SlowAsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String l Class channelClass, StreamSlowMonitor monitor) throws FailedLogCloseException, IOException { super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, - suffix, eventLoopGroup, channelClass, monitor); - - } - - public SlowAsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass); - + suffix, null, null, eventLoopGroup, channelClass, monitor); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/BrokenRemoteAsyncFSWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/BrokenRemoteAsyncFSWALProvider.java new file mode 100644 index 000000000000..5e493233b4c4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/BrokenRemoteAsyncFSWALProvider.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; +import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; +import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALProvider; + +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; +import org.apache.hbase.thirdparty.io.netty.channel.Channel; +import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + +public class BrokenRemoteAsyncFSWALProvider extends AsyncFSWALProvider { + + static class BrokenRemoteAsyncFSWAL extends AsyncFSWAL { + + private final class MyCombinedAsyncWriter implements WALProvider.AsyncWriter { + + private final WALProvider.AsyncWriter localWriter; + + private final WALProvider.AsyncWriter remoteWriter; + + // remoteWriter on the first + public MyCombinedAsyncWriter(WALProvider.AsyncWriter localWriter, + WALProvider.AsyncWriter remoteWriter) { + this.localWriter = localWriter; + this.remoteWriter = remoteWriter; + } + + @Override + public long getLength() { + return localWriter.getLength(); + } + + @Override + public long getSyncedLength() { + return this.localWriter.getSyncedLength(); + } + + @Override + public void close() throws IOException { + Closeables.close(localWriter, true); + Closeables.close(remoteWriter, true); + } + + @Override + public CompletableFuture sync(boolean forceSync) { + CompletableFuture localFuture; + CompletableFuture remoteFuture; + + if (!localBroken) { + localFuture = localWriter.sync(forceSync); + } else { + localFuture = new CompletableFuture<>(); + localFuture.completeExceptionally(new IOException("Inject error")); + } + if (!remoteBroken) { + remoteFuture = remoteWriter.sync(forceSync); + } else { + remoteFuture = new CompletableFuture<>(); + remoteFuture.completeExceptionally(new IOException("Inject error")); + } + return CompletableFuture.allOf(localFuture, remoteFuture).thenApply(v -> { + return localFuture.getNow(0L); + }); + } + + @Override + public void append(WAL.Entry entry) { + if (!localBroken) { + localWriter.append(entry); + } + if (!remoteBroken) { + remoteWriter.append(entry); + } + } + } + + private volatile boolean localBroken; + + private volatile boolean remoteBroken; + + private CountDownLatch arrive; + + private CountDownLatch resume; + + public void setLocalBroken() { + this.localBroken = true; + } + + public void setRemoteBroken() { + this.remoteBroken = true; + } + + public void suspendLogRoll() { + arrive = new CountDownLatch(1); + resume = new CountDownLatch(1); + } + + public void waitUntilArrive() throws InterruptedException { + arrive.await(); + } + + public void resumeLogRoll() { + resume.countDown(); + } + + public BrokenRemoteAsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, + String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, FileSystem remoteFs, Path remoteWALDir, + EventLoopGroup eventLoopGroup, Class channelClass, + StreamSlowMonitor monitor) throws FailedLogCloseException, IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix, remoteFs, remoteWALDir, eventLoopGroup, channelClass, monitor); + } + + @Override + protected WALProvider.AsyncWriter createCombinedWriter(WALProvider.AsyncWriter localWriter, + WALProvider.AsyncWriter remoteWriter) { + return new MyCombinedAsyncWriter(localWriter, remoteWriter); + } + + @Override + protected WALProvider.AsyncWriter createWriterInstance(FileSystem fs, Path path) + throws IOException { + if (arrive != null) { + arrive.countDown(); + try { + resume.await(); + } catch (InterruptedException e) { + } + } + if (localBroken || remoteBroken) { + throw new IOException("WAL broken"); + } + return super.createWriterInstance(fs, path); + } + } + + @Override + protected AsyncFSWAL createWAL() throws IOException { + return new BrokenRemoteAsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.getFactoryId()), + getWALArchiveDirectoryName(conf, factory.getFactoryId()), conf, listeners, true, logPrefix, + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, null, null, + eventLoopGroup, channelClass, + factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); + + } + + @Override + protected WAL createRemoteWAL(RegionInfo region, FileSystem remoteFs, Path remoteWALDir, + String prefix, String suffix) throws IOException { + return new BrokenRemoteAsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.getFactoryId()), + getWALArchiveDirectoryName(conf, factory.getFactoryId()), conf, listeners, true, prefix, + suffix, remoteFs, remoteWALDir, eventLoopGroup, channelClass, + factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java deleted file mode 100644 index d2043b469a61..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; -import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; -import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; -import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; - -import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -import org.apache.hbase.thirdparty.io.netty.channel.Channel; -import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; - -class DualAsyncFSWALForTest extends DualAsyncFSWAL { - - private boolean localBroken; - - private boolean remoteBroken; - - private CountDownLatch arrive; - - private CountDownLatch resume; - - private final class MyCombinedAsyncWriter implements AsyncWriter { - - private final AsyncWriter localWriter; - - private final AsyncWriter remoteWriter; - - public MyCombinedAsyncWriter(AsyncWriter localWriter, AsyncWriter remoteWriter) { - this.localWriter = localWriter; - this.remoteWriter = remoteWriter; - } - - @Override - public long getLength() { - return localWriter.getLength(); - } - - @Override - public long getSyncedLength() { - return this.localWriter.getSyncedLength(); - } - - @Override - public void close() throws IOException { - Closeables.close(localWriter, true); - Closeables.close(remoteWriter, true); - } - - @Override - public CompletableFuture sync(boolean forceSync) { - CompletableFuture localFuture; - CompletableFuture remoteFuture; - if (!localBroken) { - localFuture = localWriter.sync(forceSync); - } else { - localFuture = new CompletableFuture<>(); - localFuture.completeExceptionally(new IOException("Inject error")); - } - if (!remoteBroken) { - remoteFuture = remoteWriter.sync(forceSync); - } else { - remoteFuture = new CompletableFuture<>(); - remoteFuture.completeExceptionally(new IOException("Inject error")); - } - return CompletableFuture.allOf(localFuture, remoteFuture).thenApply(v -> { - return localFuture.getNow(0L); - }); - } - - @Override - public void append(Entry entry) { - if (!localBroken) { - localWriter.append(entry); - } - if (!remoteBroken) { - remoteWriter.append(entry); - } - } - } - - public DualAsyncFSWALForTest(FileSystem fs, FileSystem remoteFs, Path rootDir, Path remoteWALDir, - String logDir, String archiveDir, Configuration conf, List listeners, - boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { - super(fs, remoteFs, rootDir, remoteWALDir, logDir, archiveDir, conf, listeners, failIfWALExists, - prefix, suffix, eventLoopGroup, channelClass); - } - - @Override - protected AsyncWriter createCombinedAsyncWriter(AsyncWriter localWriter, - AsyncWriter remoteWriter) { - return new MyCombinedAsyncWriter(localWriter, remoteWriter); - } - - @Override - protected AsyncWriter createWriterInstance(Path path) throws IOException { - if (arrive != null) { - arrive.countDown(); - try { - resume.await(); - } catch (InterruptedException e) { - } - } - if (localBroken || remoteBroken) { - throw new IOException("WAL broken"); - } - return super.createWriterInstance(path); - } - - public void setLocalBroken() { - this.localBroken = true; - } - - public void setRemoteBroken() { - this.remoteBroken = true; - } - - public void suspendLogRoll() { - arrive = new CountDownLatch(1); - resume = new CountDownLatch(1); - } - - public void waitUntilArrive() throws InterruptedException { - arrive.await(); - } - - public void resumeLogRoll() { - resume.countDown(); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationActiveTestBase.java similarity index 95% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationActiveTestBase.java index 4c51a0a19458..ce6b7f65bef9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationActiveTestBase.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncTable; @@ -43,16 +42,11 @@ import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALStreamReader; import org.junit.Assert; -import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @Category({ ReplicationTests.class, LargeTests.class }) -public class TestSyncReplicationActive extends SyncReplicationTestBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncReplicationActive.class); +public class SyncReplicationActiveTestBase extends SyncReplicationTestBase { @Test public void testActive() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationStandbyTestBase.java similarity index 84% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationStandbyTestBase.java index e13f09b19f79..a11f65cfcd24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationStandbyTestBase.java @@ -17,18 +17,19 @@ */ package org.apache.hadoop.hbase.replication; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -38,20 +39,11 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; -import org.junit.ClassRule; import org.junit.Test; -import org.junit.experimental.categories.Category; -@Category({ ReplicationTests.class, LargeTests.class }) -public class TestSyncReplicationStandBy extends SyncReplicationTestBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncReplicationStandBy.class); +public class SyncReplicationStandbyTestBase extends SyncReplicationTestBase { @FunctionalInterface private interface TableAction { @@ -60,13 +52,10 @@ private interface TableAction { } private void assertDisallow(Table table, TableAction action) throws IOException { - try { - action.call(table); - fail("Should not allow the action"); - } catch (DoNotRetryIOException | RetriesExhaustedException e) { - // expected - assertThat(e.getMessage(), containsString("STANDBY")); - } + Exception error = assertThrows(Exception.class, () -> action.call(table)); + assertThat(error, either(instanceOf(DoNotRetryIOException.class)) + .or(instanceOf(RetriesExhaustedException.class))); + assertThat(error.getMessage(), containsString("STANDBY")); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveAsyncFSWAL.java new file mode 100644 index 000000000000..a28755df75de --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveAsyncFSWAL.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestSyncReplicationActiveAsyncFSWAL extends SyncReplicationActiveTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSyncReplicationActiveAsyncFSWAL.class); + + @BeforeClass + public static void setUp() throws Exception { + UTIL1.getConfiguration().set(WALFactory.WAL_PROVIDER, "asyncfs"); + UTIL2.getConfiguration().set(WALFactory.WAL_PROVIDER, "asyncfs"); + SyncReplicationTestBase.setUp(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveFSHLog.java new file mode 100644 index 000000000000..300e3fe38123 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActiveFSHLog.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestSyncReplicationActiveFSHLog extends SyncReplicationActiveTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSyncReplicationActiveFSHLog.class); + + @BeforeClass + public static void setUp() throws Exception { + UTIL1.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + UTIL2.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + SyncReplicationTestBase.setUp(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java index e3f2ae9c5c5c..99bee53f0de2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.Assert.assertThrows; import java.util.concurrent.ExecutionException; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -31,11 +31,11 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -55,10 +55,10 @@ public class TestSyncReplicationMoreLogsInLocalCopyToRemote extends SyncReplicat @BeforeClass public static void setUp() throws Exception { - UTIL1.getConfiguration().setClass(SyncReplicationWALProvider.DUAL_WAL_IMPL, - DualAsyncFSWALForTest.class, DualAsyncFSWAL.class); - UTIL2.getConfiguration().setClass(SyncReplicationWALProvider.DUAL_WAL_IMPL, - DualAsyncFSWALForTest.class, DualAsyncFSWAL.class); + UTIL1.getConfiguration().setClass(WALFactory.WAL_PROVIDER, BrokenRemoteAsyncFSWALProvider.class, + WALProvider.class); + UTIL2.getConfiguration().setClass(WALFactory.WAL_PROVIDER, BrokenRemoteAsyncFSWALProvider.class, + WALProvider.class); SyncReplicationTestBase.setUp(); } @@ -70,19 +70,18 @@ public void testSplitLog() throws Exception { UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE); HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME); - DualAsyncFSWALForTest wal = - (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()); + + BrokenRemoteAsyncFSWALProvider.BrokenRemoteAsyncFSWAL wal = + (BrokenRemoteAsyncFSWALProvider.BrokenRemoteAsyncFSWAL) rs.getWalFactory() + .getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()); wal.setRemoteBroken(); + try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) { AsyncTable table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build(); - try { - table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))).get(); - fail("Should fail since the rs will crash and we will not retry"); - } catch (ExecutionException e) { - // expected - LOG.info("Expected error:", e); - } + ExecutionException error = assertThrows(ExecutionException.class, + () -> table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))).get()); + LOG.info("Expected error:", error); } UTIL1.waitFor(60000, new ExplainingPredicate() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java index eff50a4433e5..fd6e138dd97d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hbase.replication; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -38,11 +40,11 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -62,10 +64,10 @@ public class TestSyncReplicationMoreLogsInLocalGiveUpSplitting extends SyncRepli @BeforeClass public static void setUp() throws Exception { - UTIL1.getConfiguration().setClass(SyncReplicationWALProvider.DUAL_WAL_IMPL, - DualAsyncFSWALForTest.class, DualAsyncFSWAL.class); - UTIL2.getConfiguration().setClass(SyncReplicationWALProvider.DUAL_WAL_IMPL, - DualAsyncFSWALForTest.class, DualAsyncFSWAL.class); + UTIL1.getConfiguration().setClass(WALFactory.WAL_PROVIDER, BrokenRemoteAsyncFSWALProvider.class, + WALProvider.class); + UTIL2.getConfiguration().setClass(WALFactory.WAL_PROVIDER, BrokenRemoteAsyncFSWALProvider.class, + WALProvider.class); SyncReplicationTestBase.setUp(); } @@ -81,21 +83,18 @@ public void testSplitLog() throws Exception { table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))); } HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME); - DualAsyncFSWALForTest wal = - (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()); + BrokenRemoteAsyncFSWALProvider.BrokenRemoteAsyncFSWAL wal = + (BrokenRemoteAsyncFSWALProvider.BrokenRemoteAsyncFSWAL) rs.getWalFactory() + .getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()); wal.setRemoteBroken(); wal.suspendLogRoll(); try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) { AsyncTable table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1) .setWriteRpcTimeout(5, TimeUnit.SECONDS).build(); - try { - table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1))).get(); - fail("Should fail since the rs will hang and we will get a rpc timeout"); - } catch (ExecutionException e) { - // expected - LOG.info("Expected error:", e); - } + ExecutionException error = assertThrows(ExecutionException.class, + () -> table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1))).get()); + LOG.info("Expected error:", error); } wal.waitUntilArrive(); UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, @@ -111,12 +110,11 @@ public void testSplitLog() throws Exception { // make sure that the region is online. We can not use waitTableAvailable since the table in // stand by state can not be read from client. try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) { - try { - table.exists(new Get(Bytes.toBytes(0))); - } catch (DoNotRetryIOException | RetriesExhaustedException e) { - // expected - assertThat(e.getMessage(), containsString("STANDBY")); - } + Exception error = + assertThrows(Exception.class, () -> table.exists(new Get(Bytes.toBytes(0)))); + assertThat(error, either(instanceOf(DoNotRetryIOException.class)) + .or(instanceOf(RetriesExhaustedException.class))); + assertThat(error.getMessage(), containsString("STANDBY")); } HRegion region = UTIL1.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0); // we give up splitting the whole wal file so this record will also be gone. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyAsyncFSWAL.java new file mode 100644 index 000000000000..52a2f5a8fb27 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyAsyncFSWAL.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestSyncReplicationStandbyAsyncFSWAL extends SyncReplicationStandbyTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSyncReplicationStandbyAsyncFSWAL.class); + + @BeforeClass + public static void setUp() throws Exception { + UTIL1.getConfiguration().set(WALFactory.WAL_PROVIDER, "asyncfs"); + UTIL2.getConfiguration().set(WALFactory.WAL_PROVIDER, "asyncfs"); + SyncReplicationTestBase.setUp(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyFSHLog.java new file mode 100644 index 000000000000..16b1088bcfba --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyFSHLog.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestSyncReplicationStandbyFSHLog extends SyncReplicationStandbyTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSyncReplicationStandbyFSHLog.class); + + @BeforeClass + public static void setUp() throws Exception { + UTIL1.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + UTIL2.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + SyncReplicationTestBase.setUp(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 1bb9a3e2949b..ffeb22d01bcc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -182,7 +182,7 @@ public void setUp() throws Exception { replication = new Replication(); replication.initialize(server, FS, new Path(logDir, sn.toString()), oldLogDir, - new WALFactory(CONF, server.getServerName(), null, false)); + new WALFactory(CONF, server.getServerName(), null)); manager = replication.getReplicationManager(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java index aa5a40f44cea..5a0578a35d27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.replication.SyncReplicationTestBase; @@ -58,8 +58,8 @@ public void testShipperQuitWhenDA() throws Exception { writeAndVerifyReplication(UTIL1, UTIL2, 0, 100); HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME); - DualAsyncFSWAL wal = - (DualAsyncFSWAL) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()); + AbstractFSWAL wal = + (AbstractFSWAL) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()); String walGroupId = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getCurrentFileName().getName()); ReplicationSourceShipper shipper = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index 64bc1415657a..b173a60c95fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -197,7 +197,7 @@ public IOTestWAL(final FileSystem fs, final Path rootDir, final String logDir, // creatWriterInstance is where the new pipeline is set up for doing file rolls // if we are skipping it, just keep returning the same writer. @Override - protected Writer createWriterInstance(final Path path) throws IOException { + protected Writer createWriterInstance(FileSystem fs, final Path path) throws IOException { // we get called from the FSHLog constructor (!); always roll in this case since // we don't know yet if we're supposed to generally roll and // we need an initial file in the case of doing appends but no rolls. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java index 5d80bc819cdf..0d44ead064dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -55,8 +56,9 @@ public PauseWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass); + super(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, + null, null, eventLoopGroup, channelClass, + StreamSlowMonitor.create(conf, "monitorForSuffix")); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java deleted file mode 100644 index 76d103ace958..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.wal; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; - -import java.io.IOException; -import java.util.Optional; -import java.util.function.BiPredicate; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; -import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; -import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogTestHelper; -import org.apache.hadoop.hbase.regionserver.wal.ProtobufWALStreamReader; -import org.apache.hadoop.hbase.replication.SyncReplicationState; -import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ RegionServerTests.class, MediumTests.class }) -public class TestSyncReplicationWALProvider { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncReplicationWALProvider.class); - - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - - private static String PEER_ID = "1"; - - private static String REMOTE_WAL_DIR = "/RemoteWAL"; - - private static TableName TABLE = TableName.valueOf("table"); - - private static TableName TABLE_NO_REP = TableName.valueOf("table-no-rep"); - - private static RegionInfo REGION = RegionInfoBuilder.newBuilder(TABLE).build(); - - private static RegionInfo REGION_NO_REP = RegionInfoBuilder.newBuilder(TABLE_NO_REP).build(); - - private static WALFactory FACTORY; - - public static final class InfoProvider implements SyncReplicationPeerInfoProvider { - - @Override - public Optional> getPeerIdAndRemoteWALDir(TableName table) { - if (table != null && table.equals(TABLE)) { - return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR)); - } else { - return Optional.empty(); - } - } - - @Override - public boolean checkState(TableName table, - BiPredicate checker) { - return false; - } - } - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - UTIL.startMiniDFSCluster(3); - FACTORY = new WALFactory(UTIL.getConfiguration(), "test"); - ((SyncReplicationWALProvider) FACTORY.getWALProvider()).setPeerInfoProvider(new InfoProvider()); - UTIL.getTestFileSystem().mkdirs(new Path(REMOTE_WAL_DIR, PEER_ID)); - } - - @AfterClass - public static void tearDownAfterClass() throws IOException { - FACTORY.close(); - UTIL.shutdownMiniDFSCluster(); - } - - private void testReadWrite(DualAsyncFSWAL wal) throws Exception { - int recordCount = 100; - int columnCount = 10; - byte[] row = Bytes.toBytes("testRow"); - long timestamp = EnvironmentEdgeManager.currentTime(); - MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); - ProtobufLogTestHelper.doWrite(wal, REGION, TABLE, columnCount, recordCount, row, timestamp, - mvcc); - Path localFile = wal.getCurrentFileName(); - Path remoteFile = new Path(REMOTE_WAL_DIR + "/" + PEER_ID, localFile.getName()); - try (ProtobufWALStreamReader reader = - (ProtobufWALStreamReader) FACTORY.createStreamReader(UTIL.getTestFileSystem(), localFile)) { - ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row, - timestamp); - } - try (ProtobufWALStreamReader reader = - (ProtobufWALStreamReader) FACTORY.createStreamReader(UTIL.getTestFileSystem(), remoteFile)) { - ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row, - timestamp); - } - wal.rollWriter(); - DistributedFileSystem dfs = (DistributedFileSystem) UTIL.getDFSCluster().getFileSystem(); - UTIL.waitFor(5000, new ExplainingPredicate() { - - @Override - public boolean evaluate() throws Exception { - return dfs.isFileClosed(localFile) && dfs.isFileClosed(remoteFile); - } - - @Override - public String explainFailure() throws Exception { - StringBuilder sb = new StringBuilder(); - if (!dfs.isFileClosed(localFile)) { - sb.append(localFile + " has not been closed yet."); - } - if (!dfs.isFileClosed(remoteFile)) { - sb.append(remoteFile + " has not been closed yet."); - } - return sb.toString(); - } - }); - try (ProtobufWALStreamReader reader = - (ProtobufWALStreamReader) FACTORY.createStreamReader(UTIL.getTestFileSystem(), localFile)) { - ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row, - timestamp); - } - try (ProtobufWALStreamReader reader = - (ProtobufWALStreamReader) FACTORY.createStreamReader(UTIL.getTestFileSystem(), remoteFile)) { - ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row, - timestamp); - } - } - - @Test - public void test() throws Exception { - WAL walNoRep = FACTORY.getWAL(REGION_NO_REP); - assertThat(walNoRep, not(instanceOf(DualAsyncFSWAL.class))); - DualAsyncFSWAL wal = (DualAsyncFSWAL) FACTORY.getWAL(REGION); - assertEquals(2, FACTORY.getWALs().size()); - testReadWrite(wal); - SyncReplicationWALProvider walProvider = (SyncReplicationWALProvider) FACTORY.getWALProvider(); - walProvider.peerSyncReplicationStateChange(PEER_ID, SyncReplicationState.ACTIVE, - SyncReplicationState.DOWNGRADE_ACTIVE, 1); - assertEquals(1, FACTORY.getWALs().size()); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index 244c37bfe847..2c994f091f32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -623,16 +623,11 @@ public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit log @Test public void testWALProviders() throws IOException { Configuration conf = new Configuration(); - // if providers are not set but enable SyncReplicationWALProvider by default for master node - // with not only system tables WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); - assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); - WALProvider wrappedWALProvider = - ((SyncReplicationWALProvider) walFactory.getWALProvider()).getWrappedProvider(); - assertEquals(wrappedWALProvider.getClass(), walFactory.getMetaProvider().getClass()); + assertEquals(walFactory.getWALProvider().getClass(), walFactory.getMetaProvider().getClass()); // if providers are not set and do not enable SyncReplicationWALProvider - walFactory = new WALFactory(conf, this.currentServername, null, false); + walFactory = new WALFactory(conf, this.currentServername, null); assertEquals(walFactory.getWALProvider().getClass(), walFactory.getMetaProvider().getClass()); } @@ -641,12 +636,8 @@ public void testOnlySetWALProvider() throws IOException { Configuration conf = new Configuration(); conf.set(WAL_PROVIDER, WALFactory.Providers.multiwal.name()); WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); - WALProvider wrappedWALProvider = - ((SyncReplicationWALProvider) walFactory.getWALProvider()).getWrappedProvider(); - - assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); // class of WALProvider and metaWALProvider are the same when metaWALProvider is not set - assertEquals(WALFactory.Providers.multiwal.clazz, wrappedWALProvider.getClass()); + assertEquals(WALFactory.Providers.multiwal.clazz, walFactory.getWALProvider().getClass()); assertEquals(WALFactory.Providers.multiwal.clazz, walFactory.getMetaProvider().getClass()); } @@ -655,11 +646,8 @@ public void testOnlySetMetaWALProvider() throws IOException { Configuration conf = new Configuration(); conf.set(META_WAL_PROVIDER, WALFactory.Providers.asyncfs.name()); WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); - WALProvider wrappedWALProvider = - ((SyncReplicationWALProvider) walFactory.getWALProvider()).getWrappedProvider(); - - assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); - assertEquals(WALFactory.Providers.defaultProvider.clazz, wrappedWALProvider.getClass()); + assertEquals(WALFactory.Providers.defaultProvider.clazz, + walFactory.getWALProvider().getClass()); assertEquals(WALFactory.Providers.asyncfs.clazz, walFactory.getMetaProvider().getClass()); } From 076b5fb27fe3c64e8522755abf306e1c8287d934 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sat, 30 Sep 2023 10:39:26 -0800 Subject: [PATCH 095/514] HBASE-28081 Snapshot working dir does not retain ACLs after snapshot commit phase (#5437) Signed-off-by: Duo Zhang Signed-off-by: Andrew Purtell Signed-off-by: Aman Poonia --- .../master/snapshot/SnapshotManager.java | 40 +++++++++++++++++++ .../access/SnapshotScannerHDFSAclHelper.java | 14 ++++--- .../TestSnapshotScannerHDFSAclController.java | 13 +++--- 3 files changed, 55 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 3c421dd8bd01..ed7ef583ec52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -38,10 +38,13 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -564,6 +567,7 @@ public synchronized void prepareWorkingDirectory(SnapshotDescription snapshot) "Couldn't create working directory (" + workingDir + ") for snapshot", ProtobufUtil.createSnapshotDesc(snapshot)); } + updateWorkingDirAclsIfRequired(workingDir, workingDirFS); } catch (HBaseSnapshotException e) { throw e; } catch (IOException e) { @@ -573,6 +577,42 @@ public synchronized void prepareWorkingDirectory(SnapshotDescription snapshot) } } + /** + * If the parent dir of the snapshot working dir (e.g. /hbase/.hbase-snapshot) has non-empty ACLs, + * use them for the current working dir (e.g. /hbase/.hbase-snapshot/.tmp/{snapshot-name}) so that + * regardless of whether the snapshot commit phase performs atomic rename or non-atomic copy of + * the working dir to new snapshot dir, the ACLs are retained. + * @param workingDir working dir to build the snapshot. + * @param workingDirFS working dir file system. + * @throws IOException If ACL read/modify operation fails. + */ + private static void updateWorkingDirAclsIfRequired(Path workingDir, FileSystem workingDirFS) + throws IOException { + if ( + !workingDirFS.hasPathCapability(workingDir, CommonPathCapabilities.FS_ACLS) + || workingDir.getParent() == null || workingDir.getParent().getParent() == null + ) { + return; + } + AclStatus snapshotWorkingParentDirStatus; + try { + snapshotWorkingParentDirStatus = + workingDirFS.getAclStatus(workingDir.getParent().getParent()); + } catch (IOException e) { + LOG.warn("Unable to retrieve ACL status for path: {}, current working dir path: {}", + workingDir.getParent().getParent(), workingDir, e); + return; + } + List snapshotWorkingParentDirAclStatusEntries = + snapshotWorkingParentDirStatus.getEntries(); + if ( + snapshotWorkingParentDirAclStatusEntries != null + && snapshotWorkingParentDirAclStatusEntries.size() > 0 + ) { + workingDirFS.modifyAclEntries(workingDir, snapshotWorkingParentDirAclStatusEntries); + } + } + /** * Take a snapshot of a disabled table. * @param snapshot description of the snapshot to take. Modified to be {@link Type#DISABLED}. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java index 41f61a6efa33..0fd41e4748df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java @@ -152,11 +152,12 @@ public boolean grantAcl(UserPermission userPermission, Set skipNamespace long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces, skipTables); - LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission, - EnvironmentEdgeManager.currentTime() - start); + LOG.info("Set HDFS acl when grant {}, skipNamespaces: {}, skipTables: {}, cost {} ms", + userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { - LOG.error("Set HDFS acl error when grant: {}", userPermission, e); + LOG.error("Set HDFS acl error when grant: {}, skipNamespaces: {}, skipTables: {}", + userPermission, skipNamespaces, skipTables, e); return false; } } @@ -174,11 +175,12 @@ public boolean revokeAcl(UserPermission userPermission, Set skipNamespac long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces, skipTables); - LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission, - EnvironmentEdgeManager.currentTime() - start); + LOG.info("Set HDFS acl when revoke {}, skipNamespaces: {}, skipTables: {}, cost {} ms", + userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { - LOG.error("Set HDFS acl error when revoke: {}", userPermission, e); + LOG.error("Set HDFS acl error when revoke: {}, skipNamespaces: {}, skipTables: {}", + userPermission, skipNamespaces, skipTables, e); return false; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java index 99d5a89ac2c9..d79e3f308104 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java @@ -158,7 +158,6 @@ public void testGrantGlobal1() throws Exception { TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); snapshotAndWait(snapshot1, table); - snapshotAndWait(snapshot2, table); // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); @@ -175,6 +174,8 @@ public void testGrantGlobal1() throws Exception { // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); + // take a snapshot and ACLs are inherited automatically + snapshotAndWait(snapshot2, table); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); deleteTable(table); @@ -196,10 +197,10 @@ public void testGrantGlobal2() throws Exception { // create table in namespace1 and snapshot TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); snapshotAndWait(snapshot1, table1); - // grant G(W) - SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); admin.grant(new UserPermission(grantUserName, Permission.newBuilder(namespace1).withActions(READ).build()), false); + // grant G(W) + SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); // create table in namespace2 and snapshot TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); snapshotAndWait(snapshot2, table2); @@ -230,11 +231,11 @@ public void testGrantGlobal3() throws Exception { // grant table1(R) TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); snapshotAndWait(snapshot1, table1); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); - snapshotAndWait(snapshot2, table2); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); // grant G(W) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); + snapshotAndWait(snapshot2, table2); // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); From 3bf3b963222c450d12d90e1c45d20a4d9a4537e8 Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Tue, 3 Oct 2023 09:33:55 +0200 Subject: [PATCH 096/514] HBASE-28126 Update MaxNumberOfRegionsCount in TestSimpleRegionNormalizer (#5446) Signed-off-by: Wellington Chevreuil --- .../hbase/master/normalizer/TestSimpleRegionNormalizer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 902205c74636..9c593035fee2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -533,7 +533,7 @@ public void testHonorsMergeRequestMaxNumberOfRegionsCountDefault() { final List regionInfos = createRegionInfos(tableName, 3); final Map regionSizes = createRegionSizesMap(regionInfos, 0, 0, 0); setupMocksForNormalizer(regionSizes, regionInfos); - assertEquals(50, normalizer.getMergeRequestMaxNumberOfRegionsCount()); + assertEquals(100, normalizer.getMergeRequestMaxNumberOfRegionsCount()); List plans = normalizer.computePlansForTable(tableDescriptor); assertThat(plans, contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 0) .addTarget(regionInfos.get(1), 0).addTarget(regionInfos.get(2), 0).build())); From e2af8f4f14debc50bff6ef0181bd07575ba3b8de Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 5 Oct 2023 08:25:03 -0400 Subject: [PATCH 097/514] HBASE-28128 Reject requests at RPC layer when RegionServer is aborting (#5447) Signed-off-by: Nick Dimiduk Reviewed-by: Duo Zhang --- .../hadoop/hbase/ipc/ServerRpcConnection.java | 40 ++- .../TestRegionServerRejectDuringAbort.java | 253 ++++++++++++++++++ 2 files changed, 281 insertions(+), 12 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index e0f69e4b84c0..695f1e7050c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; import org.apache.hadoop.hbase.security.SaslStatus; @@ -548,6 +549,19 @@ protected void processRequest(ByteBuff buf) throws IOException, InterruptedExcep Span span = TraceUtil.createRemoteSpan("RpcServer.process", traceCtx); try (Scope ignored = span.makeCurrent()) { int id = header.getCallId(); + // HBASE-28128 - if server is aborting, don't bother trying to process. It will + // fail at the handler layer, but worse might result in CallQueueTooBigException if the + // queue is full but server is not properly processing requests. Better to throw an aborted + // exception here so that the client can properly react. + if (rpcServer.server != null && rpcServer.server.isAborted()) { + RegionServerAbortedException serverIsAborted = new RegionServerAbortedException( + "Server " + rpcServer.server.getServerName() + " aborting"); + this.rpcServer.metrics.exception(serverIsAborted); + sendErrorResponseForCall(id, totalRequestSize, span, serverIsAborted.getMessage(), + serverIsAborted); + return; + } + if (RpcServer.LOG.isTraceEnabled()) { RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + " totalRequestSize: " + totalRequestSize + " bytes"); @@ -559,14 +573,11 @@ protected void processRequest(ByteBuff buf) throws IOException, InterruptedExcep (totalRequestSize + this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes ) { - final ServerCall callTooBig = createCall(id, this.service, null, null, null, null, - totalRequestSize, null, 0, this.callCleanup); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); - callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, + sendErrorResponseForCall(id, totalRequestSize, span, "Call queue is full on " + this.rpcServer.server.getServerName() - + ", is hbase.ipc.server.max.callqueue.size too small?"); - TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); - callTooBig.sendResponseIfReady(); + + ", is hbase.ipc.server.max.callqueue.size too small?", + RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); return; } MethodDescriptor md = null; @@ -621,12 +632,8 @@ protected void processRequest(ByteBuff buf) throws IOException, InterruptedExcep responseThrowable = thrown; } - ServerCall readParamsFailedCall = createCall(id, this.service, null, null, null, null, - totalRequestSize, null, 0, this.callCleanup); - readParamsFailedCall.setResponse(null, null, responseThrowable, - msg + "; " + responseThrowable.getMessage()); - TraceUtil.setError(span, responseThrowable); - readParamsFailedCall.sendResponseIfReady(); + sendErrorResponseForCall(id, totalRequestSize, span, + msg + "; " + responseThrowable.getMessage(), responseThrowable); return; } @@ -656,6 +663,15 @@ protected void processRequest(ByteBuff buf) throws IOException, InterruptedExcep } } + private void sendErrorResponseForCall(int id, long totalRequestSize, Span span, String msg, + Throwable responseThrowable) throws IOException { + ServerCall failedcall = createCall(id, this.service, null, null, null, null, + totalRequestSize, null, 0, this.callCleanup); + failedcall.setResponse(null, null, responseThrowable, msg); + TraceUtil.setError(span, responseThrowable); + failedcall.sendResponseIfReady(); + } + protected final RpcResponse getErrorResponse(String msg, Exception e) throws IOException { ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder().setCallId(-1); ServerCall.setExceptionResponse(e, msg, headerBuilder); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java new file mode 100644 index 000000000000..8add191f9ab6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.IOException; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.StartTestingClusterOption; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.ipc.CallRunner; +import org.apache.hadoop.hbase.ipc.PluggableBlockingQueue; +import org.apache.hadoop.hbase.ipc.PriorityFunction; +import org.apache.hadoop.hbase.ipc.TestPluggableQueueImpl; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestRegionServerRejectDuringAbort { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionServerRejectDuringAbort.class); + + private static final Logger LOG = + LoggerFactory.getLogger(TestRegionServerRejectDuringAbort.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static TableName TABLE_NAME = TableName.valueOf("RSRejectOnAbort"); + + private static byte[] CF = Bytes.toBytes("cf"); + + private static final int REGIONS_NUM = 5; + + private static final AtomicReference THROWN_EXCEPTION = new AtomicReference<>(null); + + private static volatile boolean shouldThrowTooBig = false; + + @BeforeClass + public static void setUp() throws Exception { + // Will schedule a abort timeout task after SLEEP_TIME_WHEN_CLOSE_REGION ms + UTIL.getConfiguration().set("hbase.ipc.server.callqueue.type", "pluggable"); + UTIL.getConfiguration().setClass("hbase.ipc.server.callqueue.pluggable.queue.class.name", + CallQueueTooBigThrowingQueue.class, PluggableBlockingQueue.class); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numRegionServers(2).build(); + UTIL.startMiniCluster(option); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME) + .setCoprocessor(SleepWhenCloseCoprocessor.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).build()).build(); + UTIL.getAdmin().createTable(td, Bytes.toBytes("0"), Bytes.toBytes("9"), REGIONS_NUM); + } + + public static final class CallQueueTooBigThrowingQueue extends TestPluggableQueueImpl { + + public CallQueueTooBigThrowingQueue(int maxQueueLength, PriorityFunction priority, + Configuration conf) { + super(maxQueueLength, priority, conf); + } + + @Override + public boolean offer(CallRunner callRunner) { + if (shouldThrowTooBig && callRunner.getRpcCall().getRequestAttribute("test") != null) { + return false; + } + return super.offer(callRunner); + } + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + /** + * Tests that the logic in ServerRpcConnection works such that if the server is aborted, it short + * circuits any other logic. This means we no longer even attempt to enqueue the request onto the + * call queue. We verify this by using a special call queue which we can trigger to always return + * CallQueueTooBigException. If the logic works, despite forcing those exceptions, we should not + * see them. + */ + @Test + public void testRejectRequestsOnAbort() throws Exception { + // We don't want to disrupt the server carrying meta, because we plan to disrupt requests to + // the server. Disrupting meta requests messes with the test. + HRegionServer serverWithoutMeta = null; + for (JVMClusterUtil.RegionServerThread regionServerThread : UTIL.getMiniHBaseCluster() + .getRegionServerThreads()) { + HRegionServer regionServer = regionServerThread.getRegionServer(); + if ( + regionServer.getRegions(TableName.META_TABLE_NAME).isEmpty() + && !regionServer.getRegions(TABLE_NAME).isEmpty() + ) { + serverWithoutMeta = regionServer; + break; + } + } + + assertNotNull("couldn't find a server without meta, but with test table regions", + serverWithoutMeta); + + Thread writer = new Thread(getWriterThreadRunnable(serverWithoutMeta.getServerName())); + writer.setDaemon(true); + writer.start(); + + // Trigger the abort. Our WriterThread will detect the first RegionServerAbortedException + // and trigger our custom queue to reject any more requests. This would typically result in + // CallQueueTooBigException, unless our logic in ServerRpcConnection to preempt the processing + // of a request is working. + serverWithoutMeta.abort("Abort RS for test"); + + UTIL.waitFor(60_000, () -> THROWN_EXCEPTION.get() != null); + assertEquals(THROWN_EXCEPTION.get().getCause().getClass(), RegionServerAbortedException.class); + } + + private Runnable getWriterThreadRunnable(ServerName loadServer) { + return () -> { + try { + Configuration conf = UTIL.getConfiguration(); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); + try (Connection conn = ConnectionFactory.createConnection(conf); + Table table = conn.getTableBuilder(TABLE_NAME, null) + .setRequestAttribute("test", new byte[] { 0 }).build()) { + // find the first region to exist on our test server, then submit requests to it + for (HRegionLocation regionLocation : table.getRegionLocator().getAllRegionLocations()) { + if (regionLocation.getServerName().equals(loadServer)) { + submitRequestsToRegion(table, regionLocation.getRegion()); + return; + } + } + throw new RuntimeException("Failed to find any regions for loadServer " + loadServer); + } + } catch (Exception e) { + LOG.warn("Failed to load data", e); + synchronized (THROWN_EXCEPTION) { + THROWN_EXCEPTION.set(e); + THROWN_EXCEPTION.notifyAll(); + } + } + }; + } + + private void submitRequestsToRegion(Table table, RegionInfo regionInfo) throws IOException { + // We will block closes of the regions with a CP, so no need to worry about the region getting + // reassigned. Just use the same rowkey always. + byte[] rowKey = getRowKeyWithin(regionInfo); + + int i = 0; + while (true) { + try { + i++; + table.put(new Put(rowKey).addColumn(CF, Bytes.toBytes(i), Bytes.toBytes(i))); + } catch (IOException e) { + // only catch RegionServerAbortedException once. After that, the next exception thrown + // is our test case + if ( + !shouldThrowTooBig && e instanceof RetriesExhaustedException + && e.getCause() instanceof RegionServerAbortedException + ) { + shouldThrowTooBig = true; + } else { + throw e; + } + } + + // small sleep to relieve pressure + Threads.sleep(10); + } + } + + private byte[] getRowKeyWithin(RegionInfo regionInfo) { + byte[] rowKey; + // region is start of table, find one after start key + if (regionInfo.getStartKey().length == 0) { + if (regionInfo.getEndKey().length == 0) { + // doesn't matter, single region table + return Bytes.toBytes(1); + } else { + // find a row just before endkey + rowKey = Bytes.copy(regionInfo.getEndKey()); + rowKey[rowKey.length - 1]--; + return rowKey; + } + } else { + return regionInfo.getStartKey(); + } + } + + public static class SleepWhenCloseCoprocessor implements RegionCoprocessor, RegionObserver { + + public SleepWhenCloseCoprocessor() { + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preClose(ObserverContext c, boolean abortRequested) + throws IOException { + // Wait so that the region can't close until we get the information we need from our test + UTIL.waitFor(60_000, () -> THROWN_EXCEPTION.get() != null); + } + } +} From 7ec0176b2fe376eb3c4f27d0de44671208a106e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:11:26 +0800 Subject: [PATCH 098/514] HBASE-28134 Bump urllib3 in /dev-support/git-jira-release-audit (#5448) Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.5 to 1.26.17. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.5...1.26.17) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index e1a2497593e3..6bc2c23625a2 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -35,5 +35,5 @@ requests-oauthlib==1.3.0 requests-toolbelt==0.9.1 six==1.14.0 smmap2==2.0.5 -urllib3==1.26.5 +urllib3==1.26.17 wcwidth==0.1.8 From eb24001248304e53c6383f309fbfc00a8437b3a1 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 6 Oct 2023 21:45:21 +0800 Subject: [PATCH 099/514] HBASE-22138 Undo our direct dependence on protos in google.protobuf.Any in Procedure.proto (#5411) Bump the hbase thirdparty dependency to 4.1.5 where we relocated the proto files in protobuf-java jar The generated protobuf messages are OK since we do not need to change any non generated java code Signed-off-by: Nick Dimiduk Signed-off-by: Nihal Jain --- hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto index 8336b5666dc8..addc96cd34c4 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto @@ -24,7 +24,7 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; option optimize_for = SPEED; -import "google/protobuf/any.proto"; +import "org/apache/hbase/thirdparty/google/protobuf/any.proto"; import "server/ErrorHandling.proto"; enum ProcedureState { diff --git a/pom.xml b/pom.xml index 56058b8e2b97..3badb8f2d39a 100644 --- a/pom.xml +++ b/pom.xml @@ -898,7 +898,7 @@ 1.1.10.4 1.9 1.5.5-2 - 4.1.4 + 4.1.5 0.8.8 From d111747a161b28b294baf4b7e033794d0ca66195 Mon Sep 17 00:00:00 2001 From: Jan Van Besien NGDATA <93314377+janvanbesien-ngdata@users.noreply.github.com> Date: Sat, 7 Oct 2023 03:21:40 +0200 Subject: [PATCH 100/514] HBASE-28082 oldWALs naming can be incompatible with HBase backup (#5445) Make the hostname parsing in BackupUtils#parseHostFromOldLog more lenient by not making any assumptions about the name of the file other than that it starts with a org.apache.hadoop.hbase.ServerName. Signed-off-by: Duo Zhang (cherry picked from commit 9262cbc1664edd92d431c27ca5b43c4bac7473c1) --- .../hadoop/hbase/backup/util/BackupUtils.java | 10 +++--- .../hadoop/hbase/backup/TestBackupUtils.java | 33 ++++++++++++------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index d4e849f610ae..d0a04960779d 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -65,6 +65,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; /** @@ -366,10 +367,11 @@ public static String parseHostFromOldLog(Path p) { return null; } try { - String n = p.getName(); - int idx = n.lastIndexOf(LOGNAME_SEPARATOR); - String s = URLDecoder.decode(n.substring(0, idx), "UTF8"); - return ServerName.valueOf(s).getAddress().toString(); + String urlDecodedName = URLDecoder.decode(p.getName(), "UTF8"); + Iterable nameSplitsOnComma = Splitter.on(",").split(urlDecodedName); + String host = Iterables.get(nameSplitsOnComma, 0); + String port = Iterables.get(nameSplitsOnComma, 1); + return host + ":" + port; } catch (Exception e) { LOG.warn("Skip log file (can't parse): {}", p); return null; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java index a55720777c4c..3fc2c31a9d51 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java @@ -87,21 +87,32 @@ public Path run() { @Test public void testFilesystemWalHostNameParsing() throws IOException { - String host = "localhost"; - int port = 60030; - ServerName serverName = ServerName.valueOf(host, port, 1234); + String[] hosts = + new String[] { "10.20.30.40", "127.0.0.1", "localhost", "a-region-server.domain.com" }; + Path walRootDir = CommonFSUtils.getWALRootDir(conf); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); - Path testWalPath = new Path(oldLogDir, - serverName.toString() + BackupUtils.LOGNAME_SEPARATOR + EnvironmentEdgeManager.currentTime()); - Path testMasterWalPath = - new Path(oldLogDir, testWalPath.getName() + MasterRegionFactory.ARCHIVED_WAL_SUFFIX); + int port = 60030; + for (String host : hosts) { + ServerName serverName = ServerName.valueOf(host, port, 1234); + + Path testOldWalPath = new Path(oldLogDir, + serverName + BackupUtils.LOGNAME_SEPARATOR + EnvironmentEdgeManager.currentTime()); + Assert.assertEquals(host + Addressing.HOSTNAME_PORT_SEPARATOR + port, + BackupUtils.parseHostFromOldLog(testOldWalPath)); + + Path testMasterWalPath = + new Path(oldLogDir, testOldWalPath.getName() + MasterRegionFactory.ARCHIVED_WAL_SUFFIX); + Assert.assertNull(BackupUtils.parseHostFromOldLog(testMasterWalPath)); - String parsedHost = BackupUtils.parseHostFromOldLog(testMasterWalPath); - Assert.assertNull(parsedHost); + // org.apache.hadoop.hbase.wal.BoundedGroupingStrategy does this + Path testOldWalWithRegionGroupingPath = new Path(oldLogDir, + serverName + BackupUtils.LOGNAME_SEPARATOR + serverName + BackupUtils.LOGNAME_SEPARATOR + + "regiongroup-0" + BackupUtils.LOGNAME_SEPARATOR + EnvironmentEdgeManager.currentTime()); + Assert.assertEquals(host + Addressing.HOSTNAME_PORT_SEPARATOR + port, + BackupUtils.parseHostFromOldLog(testOldWalWithRegionGroupingPath)); + } - parsedHost = BackupUtils.parseHostFromOldLog(testWalPath); - Assert.assertEquals(parsedHost, host + Addressing.HOSTNAME_PORT_SEPARATOR + port); } } From 544d3683a7d33c1f289113d4d7d7df121eb58084 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 7 Oct 2023 11:31:23 +0800 Subject: [PATCH 101/514] HBASE-28136 HRegionServer should implement isStopping method (#5452) Signed-off-by: GeorryHuang Signed-off-by: Xiaolin Ha --- .../org/apache/hadoop/hbase/regionserver/HRegionServer.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 110a9f7fe562..85721a354977 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -781,6 +781,11 @@ private void initializeReplicationMarkerChore() { } } + @Override + public boolean isStopping() { + return stopping; + } + /** * The HRegionServer sticks in this loop until closed. */ From 6455c49239a4eeb966a4f4d9afbffc9610e6d394 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 7 Oct 2023 11:35:37 +0800 Subject: [PATCH 102/514] HBASE-28129 Do not retry refreshSources when region server is already stopping (#5453) Signed-off-by: GeorryHuang Signed-off-by: Xiaolin Ha --- .../regionserver/ReplicationSource.java | 50 ++++++++++--------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 00be66c5c0fd..4c864e5e4502 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -370,11 +370,9 @@ private void tryStartNewShipper(String walGroupId) { ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, getStartOffset(walGroupId)); ReplicationSourceShipper worker = createNewShipper(walGroupId, walReader); - Threads.setDaemonThreadRunning( - walReader, Thread.currentThread().getName() + ".replicationSource.wal-reader." - + walGroupId + "," + queueId, - (t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); - worker.startup((t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); + Threads.setDaemonThreadRunning(walReader, Thread.currentThread().getName() + + ".replicationSource.wal-reader." + walGroupId + "," + queueId, this::retryRefreshing); + worker.startup(this::retryRefreshing); return worker; } }); @@ -448,24 +446,30 @@ WALEntryFilter getWalEntryFilter() { return walEntryFilter; } - private void uncaughtException(Thread t, Throwable e, ReplicationSourceManager manager, - String peerId) { - OOMEChecker.exitIfOOME(e, getClass().getSimpleName()); - LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), e); + // log the error, check if the error is OOME, or whether we should abort the server + private void checkError(Thread t, Throwable error) { + OOMEChecker.exitIfOOME(error, getClass().getSimpleName()); + LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), error); if (abortOnError) { - server.abort("Unexpected exception in " + t.getName(), e); + server.abort("Unexpected exception in " + t.getName(), error); } - if (manager != null) { - while (true) { - try { - LOG.info("Refreshing replication sources now due to previous error on thread: {}", - t.getName()); - manager.refreshSources(peerId); - break; - } catch (IOException | ReplicationException e1) { - LOG.error("Replication sources refresh failed.", e1); - sleepForRetries("Sleeping before try refreshing sources again", maxRetriesMultiplier); - } + } + + private void retryRefreshing(Thread t, Throwable error) { + checkError(t, error); + while (true) { + if (server.isAborted() || server.isStopped() || server.isStopping()) { + LOG.warn("Server is shutting down, give up refreshing source for peer {}", getPeerId()); + return; + } + try { + LOG.info("Refreshing replication sources now due to previous error on thread: {}", + t.getName()); + manager.refreshSources(getPeerId()); + break; + } catch (Exception e) { + LOG.error("Replication sources refresh failed.", e); + sleepForRetries("Sleeping before try refreshing sources again", maxRetriesMultiplier); } } } @@ -630,7 +634,7 @@ public ReplicationSourceInterface startup() { // keep looping in this thread until initialize eventually succeeds, // while the server main startup one can go on with its work. sourceRunning = false; - uncaughtException(t, e, null, null); + checkError(t, e); retryStartup.set(!this.abortOnError); do { if (retryStartup.get()) { @@ -641,7 +645,7 @@ public ReplicationSourceInterface startup() { initialize(); } catch (Throwable error) { setSourceStartupStatus(false); - uncaughtException(t, error, null, null); + checkError(t, error); retryStartup.set(!this.abortOnError); } } From 865a59547b3058100c834a28b29ea3e10839f8e3 Mon Sep 17 00:00:00 2001 From: Ke Han <38852697+hanke580@users.noreply.github.com> Date: Sat, 7 Oct 2023 02:50:24 -0400 Subject: [PATCH 103/514] HBASE-28109 NPE for the region state: Failed to become active master (HMaster) (#5432) The RegionState for meta region could be null for a fresh new cluster, we should also wait a bit to let it finish the initialization. Signed-off-by: Duo Zhang --- .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 995bff17724e..b0862a090760 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1412,7 +1412,7 @@ private boolean isRegionOnline(RegionInfo ri) { RetryCounter rc = null; while (!isStopped()) { RegionState rs = this.assignmentManager.getRegionStates().getRegionState(ri); - if (rs.isOpened()) { + if (rs != null && rs.isOpened()) { if (this.getServerManager().isServerOnline(rs.getServerName())) { return true; } From f15ff5b167d5ade7a5f02fa35d41a78218ee8aaf Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 9 Oct 2023 21:19:38 +0800 Subject: [PATCH 104/514] HBASE-28127 Upgrade avro version to 1.11.3 (#5454) Signed-off-by: GeorryHuang --- .../src/main/resources/supplemental-models.xml | 18 ++++++++++++++++++ pom.xml | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml b/hbase-resource-bundle/src/main/resources/supplemental-models.xml index b7204d71acc8..5dbdd7b42556 100644 --- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml +++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml @@ -2429,4 +2429,22 @@ Copyright (c) 2007-2017 The JRuby project + + + + org.apache.avro + avro + + The Apache Software Foundation + http://www.apache.org/ + + + + Apache License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + diff --git a/pom.xml b/pom.xml index 3badb8f2d39a..187a877eb295 100644 --- a/pom.xml +++ b/pom.xml @@ -818,7 +818,7 @@ When building with jdk11, we will use 0.14.1, please see the build-with-jdk11 profile. --> 0.13.0 - 1.11.0 + 1.11.3 2.8.1 1.15 1.7 From 921a5706a133ef086bd5c319c26cc82c7497e5c8 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 10 Oct 2023 20:51:04 -0800 Subject: [PATCH 105/514] HBASE-28144 Canary publish read failure fails with NPE if region location is null (#5456) Signed-off-by: Wellington Chevreuil Signed-off-by: David Manning --- .../apache/hadoop/hbase/tool/CanaryTool.java | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index 12cb90c08e66..d5676263c820 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -323,13 +323,15 @@ public void resetFailuresCountDetails() { } private void incFailuresCountDetails(ServerName serverName, RegionInfo region) { - perServerFailuresCount.compute(serverName, (server, count) -> { - if (count == null) { - count = new LongAdder(); - } - count.increment(); - return count; - }); + if (serverName != null) { + perServerFailuresCount.compute(serverName, (server, count) -> { + if (count == null) { + count = new LongAdder(); + } + count.increment(); + return count; + }); + } perTableFailuresCount.compute(region.getTable().getNameAsString(), (tableName, count) -> { if (count == null) { count = new LongAdder(); @@ -340,18 +342,18 @@ private void incFailuresCountDetails(ServerName serverName, RegionInfo region) { } public void publishReadFailure(ServerName serverName, RegionInfo region, Exception e) { - incReadFailureCount(); - incFailuresCountDetails(serverName, region); LOG.error("Read from {} on serverName={} failed", region.getRegionNameAsString(), serverName, e); + incReadFailureCount(); + incFailuresCountDetails(serverName, region); } public void publishReadFailure(ServerName serverName, RegionInfo region, ColumnFamilyDescriptor column, Exception e) { - incReadFailureCount(); - incFailuresCountDetails(serverName, region); LOG.error("Read from {} on serverName={}, columnFamily={} failed", region.getRegionNameAsString(), serverName, column.getNameAsString(), e); + incReadFailureCount(); + incFailuresCountDetails(serverName, region); } public void publishReadTiming(ServerName serverName, RegionInfo region, @@ -368,17 +370,17 @@ public void publishReadTiming(ServerName serverName, RegionInfo region, } public void publishWriteFailure(ServerName serverName, RegionInfo region, Exception e) { + LOG.error("Write to {} on {} failed", region.getRegionNameAsString(), serverName, e); incWriteFailureCount(); incFailuresCountDetails(serverName, region); - LOG.error("Write to {} on {} failed", region.getRegionNameAsString(), serverName, e); } public void publishWriteFailure(ServerName serverName, RegionInfo region, ColumnFamilyDescriptor column, Exception e) { - incWriteFailureCount(); - incFailuresCountDetails(serverName, region); LOG.error("Write to {} on {} {} failed", region.getRegionNameAsString(), serverName, column.getNameAsString(), e); + incWriteFailureCount(); + incFailuresCountDetails(serverName, region); } public void publishWriteTiming(ServerName serverName, RegionInfo region, From 0d15d03d69a672e27fa5cbfac5a0918273703678 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 11 Oct 2023 15:21:18 +0200 Subject: [PATCH 106/514] HBASE-28133 TestSyncTimeRangeTracker fails with OOM with small -Xms values (#5450) reduce test dataset size by 20% to fit heap with safety margin Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi --- .../hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java index 9dba0d1c1bbc..99b29f8aeb71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java @@ -34,7 +34,7 @@ public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSyncTimeRangeTracker.class); - private static final int NUM_KEYS = 10000000; + private static final int NUM_KEYS = 8000000; private static final int NUM_OF_THREADS = 20; @Override From 991d4aab518d77757e6674e630a0f5d53a712dda Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Oct 2023 14:54:01 +0800 Subject: [PATCH 107/514] HBASE-28147 Bump gitpython from 3.1.35 to 3.1.37 in /dev-support/flaky-tests (#5458) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.35 to 3.1.37. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.35...3.1.37) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index 106ead4aa4aa..9ca8c66afb1a 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -17,6 +17,6 @@ # requests==2.31.0 future==0.18.3 -gitpython==3.1.35 +gitpython==3.1.37 rbtools==4.0 jinja2==3.1.2 From 8f555815a553c4f5ff3ea698629419d5c9af259f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Oct 2023 15:04:27 +0800 Subject: [PATCH 108/514] HBASE-28148 Bump gitpython in /dev-support/git-jira-release-audit (#5459) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.35 to 3.1.37. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.35...3.1.37) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 6bc2c23625a2..1f01ac81a82e 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -23,7 +23,7 @@ cryptography==41.0.4 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 -GitPython==3.1.35 +GitPython==3.1.37 idna==2.8 jira==2.0.0 oauthlib==3.1.0 From 2568222380ee4e08cc043b1b0dfb6bd8005107d9 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 12 Oct 2023 09:16:09 +0200 Subject: [PATCH 109/514] HBASE-28135 Specify -Xms for tests (#5451) Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi --- pom.xml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 187a877eb295..7d0bb49dcdab 100644 --- a/pom.xml +++ b/pom.xml @@ -941,7 +941,10 @@ This value is managed separately for jdk11. See below. --> 2200m + 1000m + 2200m + 1000m -enableassertions -Dhbase.build.id=${build.id} -Xmx${surefire.Xmx} - -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true + -Xms${surefire.Xms} -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true -Djdk.net.URLClassPath.disableClassPathURLCheck=true -Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced -Dio.netty.eventLoopThreads=3 -Dio.opentelemetry.context.enableStrictContext=true -enableassertions -Xmx${surefire.cygwinXmx} - -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true + -Xms${surefire.cygwinXms} -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true "-Djava.library.path=${hadoop.library.path};${java.library.path}" -Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced -Dio.opentelemetry.context.enableStrictContext=true From 8a9ad0736621fa1b00b5ae90529ca6065f88c67f Mon Sep 17 00:00:00 2001 From: qiuwei68 <81564139+qiuwei68@users.noreply.github.com> Date: Thu, 12 Oct 2023 18:40:30 +0800 Subject: [PATCH 110/514] HBASE-28138 Make the connection idle timeout configurable for the embedded HTTP servers (#5457) Signed-off-by: Wellington Chevreuil --- .../java/org/apache/hadoop/hbase/http/HttpServer.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 9f4ce64a8803..a2b8a8fb6275 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -115,6 +115,11 @@ public class HttpServer implements FilterContainer { // limitation otherwise the UTs will fail private static final int DEFAULT_MAX_HEADER_SIZE = Character.MAX_VALUE - 1; + // Add configuration for jetty idle timeout + private static final String HTTP_JETTY_IDLE_TIMEOUT = "hbase.ui.connection.idleTimeout"; + // Default jetty idle timeout + private static final long DEFAULT_HTTP_JETTY_IDLE_TIMEOUT = 30000; + static final String FILTER_INITIALIZERS_PROPERTY = "hbase.http.filter.initializers"; static final String HTTP_MAX_THREADS = "hbase.http.max.threads"; @@ -467,6 +472,9 @@ public HttpServer build() throws IOException { // default settings for connector listener.setAcceptQueueSize(128); + // config idle timeout for jetty + listener + .setIdleTimeout(conf.getLong(HTTP_JETTY_IDLE_TIMEOUT, DEFAULT_HTTP_JETTY_IDLE_TIMEOUT)); if (Shell.WINDOWS) { // result of setting the SO_REUSEADDR flag is different on Windows // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx From 391dfda6adcbe42b5dcb68a4bb98f1fce49ae88c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 13 Oct 2023 22:19:18 +0800 Subject: [PATCH 111/514] HBASE-28140 AbstractWALProvider may miss the WAL which is under creation in getWALs method (#5455) Signed-off-by: GeorryHuang Signed-off-by: Xiaolin Ha Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/wal/AbstractWALProvider.java | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java index e9c63fb52170..31ef3cebc2d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALProvider.java @@ -24,7 +24,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiPredicate; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -87,6 +89,15 @@ public abstract class AbstractWALProvider implements WALProvider, PeerActionList private final KeyLocker createLock = new KeyLocker<>(); + // in getWALs we can not throw any exceptions out, so we use lock and condition here as it + // supports awaitUninterruptibly which will not throw a InterruptedException + private final Lock numRemoteWALUnderCreationLock = new ReentrantLock(); + private final Condition noRemoteWALUnderCreationCond = + numRemoteWALUnderCreationLock.newCondition(); + // record the number of remote WALs which are under creation. This is very important to not + // missing a WAL instance in getWALs method. See HBASE-28140 and related issues for more details. + private int numRemoteWALUnderCreation; + // we need to have this because when getting meta wal, there is no peer info provider yet. private SyncReplicationPeerInfoProvider peerInfoProvider = new SyncReplicationPeerInfoProvider() { @@ -150,11 +161,26 @@ private WAL getRemoteWAL(RegionInfo region, String peerId, String remoteWALDir) WAL wal = createRemoteWAL(region, ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir), ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId), getRemoteWALPrefix(peerId), ReplicationUtils.SYNC_WAL_SUFFIX); + numRemoteWALUnderCreationLock.lock(); + try { + numRemoteWALUnderCreation++; + } finally { + numRemoteWALUnderCreationLock.unlock(); + } initWAL(wal); peerId2WAL.put(peerId, Optional.of(wal)); return wal; } finally { lock.unlock(); + numRemoteWALUnderCreationLock.lock(); + try { + numRemoteWALUnderCreation--; + if (numRemoteWALUnderCreation == 0) { + noRemoteWALUnderCreationCond.signalAll(); + } + } finally { + numRemoteWALUnderCreationLock.unlock(); + } } } @@ -179,6 +205,17 @@ public final WAL getWAL(RegionInfo region) throws IOException { @Override public final List getWALs() { + List wals = new ArrayList(); + numRemoteWALUnderCreationLock.lock(); + try { + while (numRemoteWALUnderCreation > 0) { + noRemoteWALUnderCreationCond.awaitUninterruptibly(); + } + peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get) + .forEach(wals::add); + } finally { + numRemoteWALUnderCreationLock.unlock(); + } return Streams .concat(peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get), getWALs0().stream()) From d3c03425214be3bf11cc501b567b18fd99641efd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Oct 2023 23:20:12 +0800 Subject: [PATCH 112/514] HBASE-28169 Bump urllib3 in /dev-support/git-jira-release-audit (#5465) Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.17 to 1.26.18. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.17...1.26.18) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 1f01ac81a82e..c243f731e1d6 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -35,5 +35,5 @@ requests-oauthlib==1.3.0 requests-toolbelt==0.9.1 six==1.14.0 smmap2==2.0.5 -urllib3==1.26.17 +urllib3==1.26.18 wcwidth==0.1.8 From e07d1fe0059d5dc17d1aad7c582e486c0f75fe52 Mon Sep 17 00:00:00 2001 From: hiping-tech <58875741+hiping-tech@users.noreply.github.com> Date: Thu, 19 Oct 2023 23:22:10 +0800 Subject: [PATCH 113/514] HBASE-28113 Modify the way of acquiring the RegionStateNode lock in checkOnlineRegionsReport to tryLock (#5442) * To prevent threads from being blocked by the lock of RegionStateNode, modify the way of acquiring the RegionStateNode lock in checkOnlineRegionsReport to tryLock. Co-authored-by: lvhaiping.lhp Signed-off-by: Duo Zhang --- .../master/assignment/AssignmentManager.java | 52 +++++++++++-------- .../master/assignment/RegionStateNode.java | 4 ++ 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index f2cfad87997c..789a6e2ca89d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -1398,29 +1398,39 @@ private void checkOnlineRegionsReport(ServerStateNode serverNode, Set re continue; } final long lag = 1000; - regionNode.lock(); - try { - long diff = EnvironmentEdgeManager.currentTime() - regionNode.getLastUpdate(); - if (regionNode.isInState(State.OPENING, State.OPEN)) { - // This is possible as a region server has just closed a region but the region server - // report is generated before the closing, but arrive after the closing. Make sure there - // is some elapsed time so less false alarms. - if (!regionNode.getRegionLocation().equals(serverName) && diff > lag) { - LOG.warn("Reporting {} server does not match {} (time since last " - + "update={}ms); closing...", serverName, regionNode, diff); - closeRegionSilently(serverNode.getServerName(), regionName); - } - } else if (!regionNode.isInState(State.CLOSING, State.SPLITTING)) { - // So, we can get report that a region is CLOSED or SPLIT because a heartbeat - // came in at about same time as a region transition. Make sure there is some - // elapsed time so less false alarms. - if (diff > lag) { - LOG.warn("Reporting {} state does not match {} (time since last update={}ms)", - serverName, regionNode, diff); + // This is just a fallback check designed to identify unexpected data inconsistencies, so we + // use tryLock to attempt to acquire the lock, and if the lock cannot be acquired, we skip the + // check. This will not cause any additional problems and also prevents the regionServerReport + // call from being stuck for too long which may cause deadlock on region assignment. + if (regionNode.tryLock()) { + try { + long diff = EnvironmentEdgeManager.currentTime() - regionNode.getLastUpdate(); + if (regionNode.isInState(State.OPENING, State.OPEN)) { + // This is possible as a region server has just closed a region but the region server + // report is generated before the closing, but arrive after the closing. Make sure + // there + // is some elapsed time so less false alarms. + if (!regionNode.getRegionLocation().equals(serverName) && diff > lag) { + LOG.warn("Reporting {} server does not match {} (time since last " + + "update={}ms); closing...", serverName, regionNode, diff); + closeRegionSilently(serverNode.getServerName(), regionName); + } + } else if (!regionNode.isInState(State.CLOSING, State.SPLITTING)) { + // So, we can get report that a region is CLOSED or SPLIT because a heartbeat + // came in at about same time as a region transition. Make sure there is some + // elapsed time so less false alarms. + if (diff > lag) { + LOG.warn("Reporting {} state does not match {} (time since last update={}ms)", + serverName, regionNode, diff); + } } + } finally { + regionNode.unlock(); } - } finally { - regionNode.unlock(); + } else { + LOG.warn( + "Unable to acquire lock for regionNode {}. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now.", + regionNode); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java index 3856ce227ba1..91c0222facd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java @@ -323,6 +323,10 @@ public void lock() { lock.lock(); } + public boolean tryLock() { + return lock.tryLock(); + } + public void unlock() { lock.unlock(); } From dde504ce489fd3fd55166a872768a077400ba2ab Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 20 Oct 2023 11:58:28 +0800 Subject: [PATCH 114/514] HBASE-28155 RecoveredReplicationSource quit when there are still unfinished groups (#5466) Signed-off-by: Guanghao Zhang --- .../RecoveredReplicationSource.java | 16 ++++++++++ .../regionserver/ReplicationSource.java | 29 ++++++++++++++----- 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index e9062472221c..e47df36e3aa2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -26,6 +26,22 @@ @InterfaceAudience.Private public class RecoveredReplicationSource extends ReplicationSource { + @Override + protected void startShippers() { + for (String walGroupId : logQueue.getQueues().keySet()) { + workerThreads.put(walGroupId, createNewShipper(walGroupId)); + } + // start shippers after initializing the workerThreads, as in the below postFinish logic, if + // workerThreads is empty, we will mark the RecoveredReplicationSource as finished. So if we + // start the worker on the fly, it is possible that a shipper has already finished its work and + // called postFinish, and find out the workerThreads is empty and then mark the + // RecoveredReplicationSource as finish, while the next shipper has not been added to + // workerThreads yet. See HBASE-28155 for more details. + for (ReplicationSourceShipper shipper : workerThreads.values()) { + startShipper(shipper); + } + } + @Override protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId, ReplicationSourceWALReader walReader) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 4c864e5e4502..094fa4aaa786 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -360,6 +360,19 @@ private long getStartOffset(String walGroupId) { } } + protected final ReplicationSourceShipper createNewShipper(String walGroupId) { + ReplicationSourceWALReader walReader = + createNewWALReader(walGroupId, getStartOffset(walGroupId)); + ReplicationSourceShipper worker = createNewShipper(walGroupId, walReader); + Threads.setDaemonThreadRunning(walReader, Thread.currentThread().getName() + + ".replicationSource.wal-reader." + walGroupId + "," + queueId, this::retryRefreshing); + return worker; + } + + protected final void startShipper(ReplicationSourceShipper worker) { + worker.startup(this::retryRefreshing); + } + private void tryStartNewShipper(String walGroupId) { workerThreads.compute(walGroupId, (key, value) -> { if (value != null) { @@ -367,12 +380,8 @@ private void tryStartNewShipper(String walGroupId) { return value; } else { LOG.debug("{} starting shipping worker for walGroupId={}", logPeerId(), walGroupId); - ReplicationSourceWALReader walReader = - createNewWALReader(walGroupId, getStartOffset(walGroupId)); - ReplicationSourceShipper worker = createNewShipper(walGroupId, walReader); - Threads.setDaemonThreadRunning(walReader, Thread.currentThread().getName() - + ".replicationSource.wal-reader." + walGroupId + "," + queueId, this::retryRefreshing); - worker.startup(this::retryRefreshing); + ReplicationSourceShipper worker = createNewShipper(walGroupId); + startShipper(worker); return worker; } }); @@ -522,7 +531,7 @@ private long getCurrentBandwidth() { * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ - protected boolean sleepForRetries(String msg, int sleepMultiplier) { + private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, @@ -605,10 +614,14 @@ private void initialize() { queueId, logQueue.getNumQueues(), clusterId, peerClusterId); initializeWALEntryFilter(peerClusterId); // Start workers + startShippers(); + setSourceStartupStatus(false); + } + + protected void startShippers() { for (String walGroupId : logQueue.getQueues().keySet()) { tryStartNewShipper(walGroupId); } - setSourceStartupStatus(false); } private synchronized void setSourceStartupStatus(boolean initializing) { From 4429de48bace58f7581a3ad568c19531a1697071 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 20 Oct 2023 22:22:16 +0800 Subject: [PATCH 115/514] HBASE-28114 Add more comments to explain why replication log queue could never be empty for normal replication queue (#5443) Also add a retry logic to make the code more robust Signed-off-by: Xiaolin Ha --- .../regionserver/WALEntryStream.java | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index c6268674c5b8..d1f85774a635 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -334,11 +334,35 @@ private HasNext tryAdvanceEntry() { boolean beingWritten = pair.getSecond(); LOG.trace("Reading WAL {}; result={}, currently open for write={}", this.currentPath, state, beingWritten); + // The below implementation needs to make sure that when beingWritten == true, we should not + // dequeue the current WAL file in logQueue. switch (state) { case NORMAL: // everything is fine, just return return HasNext.YES; case EOF_WITH_TRAILER: + // in readNextEntryAndRecordReaderPosition, we will acquire rollWriteLock, and we can only + // schedule a close writer task, in which we will write trailer, under the rollWriteLock, so + // typically if beingWritten == true, we should not reach here, as we need to reopen the + // reader after writing the trailer. The only possible way to reach here while beingWritten + // == true is due to the inflightWALClosures logic in AbstractFSWAL, as if the writer is + // still in this map, we will consider it as beingWritten, but actually, here we could make + // sure that the new WAL file has already been enqueued into the logQueue, so here dequeuing + // the current log file is safe. + if (beingWritten && logQueue.getQueue(walGroupId).size() <= 1) { + // As explained above, if we implement everything correctly, we should not arrive here. + // But anyway, even if we reach here due to some code changes in the future, reading + // the file again can make sure that we will not accidentally consider the queue as + // finished, and since there is a trailer, we will soon consider the file as finished + // and move on. + LOG.warn( + "We have reached the trailer while reading the file '{}' which is currently" + + " beingWritten, but it is the last file in log queue {}. This should not happen" + + " typically, try to read again so we will not miss anything", + currentPath, walGroupId); + return HasNext.RETRY; + } + assert !beingWritten || logQueue.getQueue(walGroupId).size() > 1; // we have reached the trailer, which means this WAL file has been closed cleanly and we // have finished reading it successfully, just move to the next WAL file and let the upper // layer start reading the next WAL file @@ -436,6 +460,16 @@ private void dequeueCurrentLog() { * Returns whether the file is opened for writing. */ private Pair readNextEntryAndRecordReaderPosition() { + // we must call this before actually reading from the reader, as this method will acquire the + // rollWriteLock. This is very important, as we will enqueue the new WAL file in postLogRoll, + // and before this happens, we could have already finished closing the previous WAL file. If we + // do not acquire the rollWriteLock and return whether the current file is being written to, we + // may finish reading the previous WAL file and start to read the next one, before it is + // enqueued into the logQueue, thus lead to an empty logQueue and make the shipper think the + // queue is already ended and quit. See HBASE-28114 and related issues for more details. + // in the future, if we want to optimize the logic here, for example, do not call this method + // every time, or do not acquire rollWriteLock in the implementation of this method, we need to + // carefully review the optimized implementation OptionalLong fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath); WALTailingReader.Result readResult = reader.next(fileLength.orElse(-1)); long readerPos = readResult.getEntryEndPos(); From 4b70e59697be3695c6c4209365d65499b432f45b Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Fri, 20 Oct 2023 19:15:01 -0700 Subject: [PATCH 116/514] HBASE-28172 Update downloads.xml for release 2.5.6 Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 6cb2246f0137..72643bc8bf86 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -70,26 +70,26 @@ under the License. - 2.5.5 + 2.5.6 - 2023/06/13 + 2023/10/20 - 2.5.5 vs 2.5.4 + 2.5.6 vs 2.5.5 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc)
- hadoop3-bin (sha512 asc)
- hadoop3-client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc)
+ hadoop3-bin (sha512 asc)
+ hadoop3-client-bin (sha512 asc) stable release From 91ac8abe5d408f2ac33745ebad2d4065e496d97b Mon Sep 17 00:00:00 2001 From: VAIBHAV SUBHASH JOSHI Date: Mon, 23 Oct 2023 15:52:19 +0530 Subject: [PATCH 117/514] HBASE-28064:Implement truncate_region command (#5462) Signed-off-by: Wellington Chevreuil Signed-off-by: Nihal Jain --- .../org/apache/hadoop/hbase/client/Admin.java | 14 ++ .../hbase/client/AdminOverAsyncAdmin.java | 10 + .../hadoop/hbase/client/AsyncAdmin.java | 6 + .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 + .../hbase/client/RawAsyncHBaseAdmin.java | 66 ++++++ .../shaded/protobuf/RequestConverter.java | 11 + .../main/protobuf/server/master/Master.proto | 16 ++ .../server/master/MasterProcedure.proto | 8 + .../hbase/coprocessor/MasterObserver.java | 40 ++++ .../apache/hadoop/hbase/master/HMaster.java | 31 +++ .../hbase/master/MasterCoprocessorHost.java | 54 +++++ .../hbase/master/MasterRpcServices.java | 12 + .../hadoop/hbase/master/MasterServices.java | 9 + .../master/assignment/AssignmentManager.java | 6 + .../AbstractStateMachineRegionProcedure.java | 6 + .../procedure/TableProcedureInterface.java | 3 +- .../hbase/master/procedure/TableQueue.java | 1 + .../procedure/TruncateRegionProcedure.java | 219 ++++++++++++++++++ .../client/TestAsyncRegionAdminApi2.java | 84 +++++++ .../hbase/master/MockNoopMasterServices.java | 6 + .../TestTruncateRegionProcedure.java | 202 ++++++++++++++++ .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 10 + hbase-shell/src/main/ruby/hbase/admin.rb | 10 + hbase-shell/src/main/ruby/shell.rb | 1 + .../ruby/shell/commands/truncate_region.rb | 36 +++ hbase-shell/src/test/ruby/hbase/admin_test.rb | 11 + .../hbase/thrift2/client/ThriftAdmin.java | 10 + 27 files changed, 886 insertions(+), 1 deletion(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateRegionProcedure.java create mode 100644 hbase-shell/src/main/ruby/shell/commands/truncate_region.rb diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 4d579c16af26..417e0013523a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1033,6 +1033,20 @@ default void modifyTable(TableDescriptor td) throws IOException { get(modifyTableAsync(td), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } + /** + * Truncate an individual region. + * @param regionName region to truncate + * @throws IOException if a remote or network exception occurs + */ + void truncateRegion(byte[] regionName) throws IOException; + + /** + * Truncate an individual region. Asynchronous operation. + * @param regionName region to truncate + * @throws IOException if a remote or network exception occurs + */ + Future truncateRegionAsync(byte[] regionName) throws IOException; + /** * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means * that it may be a while before your schema change is updated across all of the table. You can diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 690b6406fd3a..bb620aa3cdaa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -490,6 +490,16 @@ public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throw return admin.splitRegion(regionName, splitPoint); } + @Override + public void truncateRegion(byte[] regionName) throws IOException { + get(admin.truncateRegion(regionName)); + } + + @Override + public Future truncateRegionAsync(byte[] regionName) { + return admin.truncateRegion(regionName); + } + @Override public Future modifyTableAsync(TableDescriptor td) throws IOException { return admin.modifyTable(td); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 960982f5e3f1..1097abbbf5e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -618,6 +618,12 @@ default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOf */ CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint); + /** + * Truncate an individual region. + * @param regionName region to truncate + */ + CompletableFuture truncateRegion(byte[] regionName); + /** * Assign an individual region. * @param regionName Encoded or full name of region to assign. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 5ee8a6ab8269..0c7fd0f7b354 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -386,6 +386,11 @@ public CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint) return wrap(rawAdmin.splitRegion(regionName, splitPoint)); } + @Override + public CompletableFuture truncateRegion(byte[] regionName) { + return wrap(rawAdmin.truncateRegion(regionName)); + } + @Override public CompletableFuture assign(byte[] regionName) { return wrap(rawAdmin.assign(regionName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index ee1dfac16bd3..953dd2024767 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -1623,6 +1623,60 @@ private CompletableFuture split(final RegionInfo hri, byte[] splitPoint) { return future; } + @Override + public CompletableFuture truncateRegion(byte[] regionName) { + CompletableFuture future = new CompletableFuture<>(); + addListener(getRegionLocation(regionName), (location, err) -> { + if (err != null) { + future.completeExceptionally(err); + return; + } + RegionInfo regionInfo = location.getRegion(); + if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { + future.completeExceptionally(new IllegalArgumentException( + "Can't truncate replicas directly.Replicas are auto-truncated " + + "when their primary is truncated.")); + return; + } + ServerName serverName = location.getServerName(); + if (serverName == null) { + future + .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + return; + } + addListener(truncateRegion(regionInfo), (ret, err2) -> { + if (err2 != null) { + future.completeExceptionally(err2); + } else { + future.complete(ret); + } + }); + }); + return future; + } + + private CompletableFuture truncateRegion(final RegionInfo hri) { + CompletableFuture future = new CompletableFuture<>(); + TableName tableName = hri.getTable(); + final MasterProtos.TruncateRegionRequest request; + try { + request = RequestConverter.buildTruncateRegionRequest(hri, ng.getNonceGroup(), ng.newNonce()); + } catch (DeserializationException e) { + future.completeExceptionally(e); + return future; + } + addListener(this.procedureCall(tableName, request, MasterService.Interface::truncateRegion, + MasterProtos.TruncateRegionResponse::getProcId, + new TruncateRegionProcedureBiConsumer(tableName)), (ret, err2) -> { + if (err2 != null) { + future.completeExceptionally(err2); + } else { + future.complete(ret); + } + }); + return future; + } + @Override public CompletableFuture assign(byte[] regionName) { CompletableFuture future = new CompletableFuture<>(); @@ -2882,6 +2936,18 @@ String getOperationType() { } } + private static class TruncateRegionProcedureBiConsumer extends TableProcedureBiConsumer { + + TruncateRegionProcedureBiConsumer(TableName tableName) { + super(tableName); + } + + @Override + String getOperationType() { + return "TRUNCATE_REGION"; + } + } + private static class SnapshotProcedureBiConsumer extends TableProcedureBiConsumer { SnapshotProcedureBiConsumer(TableName tableName) { super(tableName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 33884158da48..c29aacfc5ee1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -989,6 +989,17 @@ public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionI return builder.build(); } + public static MasterProtos.TruncateRegionRequest + buildTruncateRegionRequest(final RegionInfo regionInfo, final long nonceGroup, final long nonce) + throws DeserializationException { + MasterProtos.TruncateRegionRequest.Builder builder = + MasterProtos.TruncateRegionRequest.newBuilder(); + builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo)); + builder.setNonceGroup(nonceGroup); + builder.setNonce(nonce); + return builder.build(); + } + /** * Create a protocol buffer AssignRegionRequest * @return an AssignRegionRequest diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 5d715fdcdd16..f66f3b983668 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -137,6 +137,16 @@ message SplitTableRegionResponse { optional uint64 proc_id = 1; } +message TruncateRegionRequest { + required RegionInfo region_info = 1; + optional uint64 nonce_group = 2 [default = 0]; + optional uint64 nonce = 3 [default = 0]; +} + +message TruncateRegionResponse { + optional uint64 proc_id = 1; +} + message CreateTableRequest { required TableSchema table_schema = 1; repeated bytes split_keys = 2; @@ -864,6 +874,12 @@ service MasterService { rpc SplitRegion(SplitTableRegionRequest) returns(SplitTableRegionResponse); + /** + * Truncate region + */ + rpc TruncateRegion(TruncateRegionRequest) + returns(TruncateRegionResponse); + /** Deletes a table */ rpc DeleteTable(DeleteTableRequest) returns(DeleteTableResponse); diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 3f3ecd63b002..7d5ed9d714ec 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -102,6 +102,14 @@ message TruncateTableStateData { repeated RegionInfo region_info = 5; } +enum TruncateRegionState { + TRUNCATE_REGION_PRE_OPERATION = 1; + TRUNCATE_REGION_MAKE_OFFLINE = 2; + TRUNCATE_REGION_REMOVE = 3; + TRUNCATE_REGION_MAKE_ONLINE = 4; + TRUNCATE_REGION_POST_OPERATION = 5; +} + enum DeleteTableState { DELETE_TABLE_PRE_OPERATION = 1; DELETE_TABLE_REMOVE_FROM_META = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 175ff25e7611..820fef71fd07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -585,6 +585,46 @@ default void preSplitRegionAction(final ObserverContext c, + final RegionInfo regionInfo) { + } + + /** + * Called before the truncate region procedure is called. + * @param c The environment to interact with the framework and master + * @param regionInfo The Region being truncated + */ + @SuppressWarnings("unused") + default void preTruncateRegion(final ObserverContext c, + RegionInfo regionInfo) { + } + + /** + * Called after the truncate region procedure is called. + * @param c The environment to interact with the framework and master + * @param regionInfo The Region being truncated + */ + @SuppressWarnings("unused") + default void postTruncateRegion(final ObserverContext c, + RegionInfo regionInfo) { + } + + /** + * Called post the region is truncated. + * @param c The environment to interact with the framework and master + * @param regionInfo The Region To be truncated + */ + @SuppressWarnings("unused") + default void postTruncateRegionAction(final ObserverContext c, + final RegionInfo regionInfo) { + } + /** * Called after the region is split. * @param c the environment to interact with the framework and master diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index b0862a090760..3c433f11a689 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -169,6 +169,7 @@ import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; +import org.apache.hadoop.hbase.master.procedure.TruncateRegionProcedure; import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.master.region.MasterRegionFactory; @@ -2567,6 +2568,36 @@ protected String getDescription() { }); } + @Override + public long truncateRegion(final RegionInfo regionInfo, final long nonceGroup, final long nonce) + throws IOException { + checkInitialized(); + + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preTruncateRegion(regionInfo); + + LOG.info( + getClientIdAuditPrefix() + " truncate region " + regionInfo.getRegionNameAsString()); + + // Execute the operation asynchronously + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); + submitProcedure( + new TruncateRegionProcedure(procedureExecutor.getEnvironment(), regionInfo, latch)); + latch.await(); + + getMaster().getMasterCoprocessorHost().postTruncateRegion(regionInfo); + } + + @Override + protected String getDescription() { + return "TruncateRegionProcedure"; + } + }); + } + @Override public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 493d0e3ef864..3af69b362609 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -856,6 +856,60 @@ public void call(MasterObserver observer) throws IOException { }); } + /** + * Invoked just before calling the truncate region procedure + * @param regionInfo region being truncated + */ + public void preTruncateRegion(RegionInfo regionInfo) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { + @Override + public void call(MasterObserver observer) { + observer.preTruncateRegion(this, regionInfo); + } + }); + } + + /** + * Invoked after calling the truncate region procedure + * @param regionInfo region being truncated + */ + public void postTruncateRegion(RegionInfo regionInfo) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { + @Override + public void call(MasterObserver observer) { + observer.postTruncateRegion(this, regionInfo); + } + }); + } + + /** + * Invoked just before calling the truncate region procedure + * @param region Region to be truncated + * @param user The user + */ + public void preTruncateRegionAction(final RegionInfo region, User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { + @Override + public void call(MasterObserver observer) throws IOException { + observer.preTruncateRegionAction(this, region); + } + }); + } + + /** + * Invoked after calling the truncate region procedure + * @param region Region which was truncated + * @param user The user + */ + public void postTruncateRegionAction(final RegionInfo region, User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { + @Override + public void call(MasterObserver observer) throws IOException { + observer.postTruncateRegionAction(this, region); + } + }); + } + /** * This will be called before update META step as part of split table region procedure. * @param user the user diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index b6a17d8503b2..736fbae0dea9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -958,6 +958,18 @@ public SplitTableRegionResponse splitRegion(final RpcController controller, } } + @Override + public MasterProtos.TruncateRegionResponse truncateRegion(RpcController controller, + final MasterProtos.TruncateRegionRequest request) throws ServiceException { + try { + long procId = server.truncateRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()), + request.getNonceGroup(), request.getNonce()); + return MasterProtos.TruncateRegionResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + @Override public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller, final ClientProtos.CoprocessorServiceRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 933bf0d18150..2a244cb3aa47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -488,4 +488,13 @@ boolean normalizeRegions(final NormalizeTableFilterParams ntfp, final boolean is */ long flushTable(final TableName tableName, final List columnFamilies, final long nonceGroup, final long nonce) throws IOException; + + /** + * Truncate region + * @param regionInfo region to be truncated + * @param nonceGroup the nonce group + * @param nonce the nonce + * @return procedure Id + */ + long truncateRegion(RegionInfo regionInfo, long nonceGroup, long nonce) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 789a6e2ca89d..804757959d5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; +import org.apache.hadoop.hbase.master.procedure.TruncateRegionProcedure; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; @@ -1082,6 +1083,11 @@ public SplitTableRegionProcedure createSplitProcedure(final RegionInfo regionToS return new SplitTableRegionProcedure(getProcedureEnvironment(), regionToSplit, splitKey); } + public TruncateRegionProcedure createTruncateRegionProcedure(final RegionInfo regionToTruncate) + throws IOException { + return new TruncateRegionProcedure(getProcedureEnvironment(), regionToTruncate); + } + public MergeTableRegionsProcedure createMergeProcedure(RegionInfo... ris) throws IOException { return new MergeTableRegionsProcedure(getProcedureEnvironment(), ris, false); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java index fe98a78b4d74..cf886e9824b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java @@ -41,6 +41,12 @@ protected AbstractStateMachineRegionProcedure(MasterProcedureEnv env, RegionInfo this.hri = hri; } + protected AbstractStateMachineRegionProcedure(MasterProcedureEnv env, RegionInfo hri, + ProcedurePrepareLatch latch) { + super(env, latch); + this.hri = hri; + } + protected AbstractStateMachineRegionProcedure() { // Required by the Procedure framework to create the procedure on replay super(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 1ca5b17ac21f..00b9776366d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -49,7 +49,8 @@ public enum TableOperationType { REGION_ASSIGN, REGION_UNASSIGN, REGION_GC, - MERGED_REGIONS_GC/* region operations */ + MERGED_REGIONS_GC/* region operations */, + REGION_TRUNCATE } /** Returns the name of the table the procedure is operating on */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index d1acd08ea21c..2f0cec77e18c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -69,6 +69,7 @@ private static boolean requireTableExclusiveLock(TableProcedureInterface proc) { case REGION_GC: case MERGED_REGIONS_GC: case REGION_SNAPSHOT: + case REGION_TRUNCATE: return false; default: break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java new file mode 100644 index 000000000000..5e907c1681ac --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateRegionState; + +@InterfaceAudience.Private +public class TruncateRegionProcedure + extends AbstractStateMachineRegionProcedure { + private static final Logger LOG = LoggerFactory.getLogger(TruncateRegionProcedure.class); + + @SuppressWarnings("unused") + public TruncateRegionProcedure() { + // Required by the Procedure framework to create the procedure on replay + super(); + } + + public TruncateRegionProcedure(final MasterProcedureEnv env, final RegionInfo hri) + throws HBaseIOException { + super(env, hri); + checkOnline(env, getRegion()); + } + + public TruncateRegionProcedure(final MasterProcedureEnv env, final RegionInfo region, + ProcedurePrepareLatch latch) throws HBaseIOException { + super(env, region, latch); + preflightChecks(env, true); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, TruncateRegionState state) + throws InterruptedException { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case TRUNCATE_REGION_PRE_OPERATION: + if (!prepareTruncate()) { + assert isFailed() : "the truncate should have an exception here"; + return Flow.NO_MORE_STATE; + } + checkOnline(env, getRegion()); + assert getRegion().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID || isFailed() + : "Can't truncate replicas directly. " + + "Replicas are auto-truncated when their primary is truncated."; + preTruncate(env); + setNextState(TruncateRegionState.TRUNCATE_REGION_MAKE_OFFLINE); + break; + case TRUNCATE_REGION_MAKE_OFFLINE: + addChildProcedure(createUnAssignProcedures(env)); + setNextState(TruncateRegionState.TRUNCATE_REGION_REMOVE); + break; + case TRUNCATE_REGION_REMOVE: + deleteRegionFromFileSystem(env); + setNextState(TruncateRegionState.TRUNCATE_REGION_MAKE_ONLINE); + break; + case TRUNCATE_REGION_MAKE_ONLINE: + addChildProcedure(createAssignProcedures(env)); + setNextState(TruncateRegionState.TRUNCATE_REGION_POST_OPERATION); + break; + case TRUNCATE_REGION_POST_OPERATION: + postTruncate(env); + LOG.debug("truncate '" + getTableName() + "' completed"); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + if (isRollbackSupported(state)) { + setFailure("master-truncate-region", e); + } else { + LOG.warn("Retriable error trying to truncate region=" + getRegion().getRegionNameAsString() + + " state=" + state, e); + } + } + return Flow.HAS_MORE_STATE; + } + + private void deleteRegionFromFileSystem(final MasterProcedureEnv env) throws IOException { + RegionStateNode regionNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); + try { + regionNode.lock(); + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); + HRegionFileSystem.deleteRegionFromFileSystem(env.getMasterConfiguration(), + mfs.getFileSystem(), tableDir, getRegion()); + } finally { + regionNode.unlock(); + } + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final TruncateRegionState state) + throws IOException { + if (state == TruncateRegionState.TRUNCATE_REGION_PRE_OPERATION) { + // Nothing to rollback, pre-truncate is just table-state checks. + return; + } + if (state == TruncateRegionState.TRUNCATE_REGION_MAKE_OFFLINE) { + RegionStateNode regionNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); + if (regionNode == null) { + // Region was unassigned by state TRUNCATE_REGION_MAKE_OFFLINE. + // So Assign it back + addChildProcedure(createAssignProcedures(env)); + } + return; + } + // The truncate doesn't have a rollback. The execution will succeed, at some point. + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected void completionCleanup(final MasterProcedureEnv env) { + releaseSyncLatch(); + } + + @Override + protected boolean isRollbackSupported(final TruncateRegionState state) { + switch (state) { + case TRUNCATE_REGION_PRE_OPERATION: + return true; + case TRUNCATE_REGION_MAKE_OFFLINE: + return true; + default: + return false; + } + } + + @Override + protected TruncateRegionState getState(final int stateId) { + return TruncateRegionState.forNumber(stateId); + } + + @Override + protected int getStateId(final TruncateRegionState state) { + return state.getNumber(); + } + + @Override + protected TruncateRegionState getInitialState() { + return TruncateRegionState.TRUNCATE_REGION_PRE_OPERATION; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (region="); + sb.append(getRegion().getRegionNameAsString()); + sb.append(")"); + } + + private boolean prepareTruncate() throws IOException { + if (getTableName().equals(TableName.META_TABLE_NAME)) { + throw new IOException("Can't truncate region in catalog tables"); + } + return true; + } + + private void preTruncate(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.preTruncateRegionAction(getRegion(), getUser()); + } + } + + private void postTruncate(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.postTruncateRegionAction(getRegion(), getUser()); + } + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.REGION_TRUNCATE; + } + + private TransitRegionStateProcedure createUnAssignProcedures(MasterProcedureEnv env) + throws IOException { + return env.getAssignmentManager().createOneUnassignProcedure(getRegion(), true); + } + + private TransitRegionStateProcedure createAssignProcedures(MasterProcedureEnv env) { + return env.getAssignmentManager().createOneAssignProcedure(getRegion(), true); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java index 685cd00da52a..61dd87007c11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java @@ -22,10 +22,12 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -37,6 +39,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -283,4 +286,85 @@ private void splitTest(TableName tableName, int rowCount, boolean isSplitRegion, } assertEquals(2, count); } + + @Test + public void testTruncateRegion() throws Exception { + // Arrange - Create table, insert data, identify region to truncate. + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("30"), Bytes.toBytes("60"), Bytes.toBytes("90") }; + String family1 = "f1"; + String family2 = "f2"; + + final String[] sFamilies = new String[] { family1, family2 }; + final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) }; + createTableWithDefaultConf(tableName, splitKeys, bFamilies); + + AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + List regionLocations = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); + RegionInfo regionToBeTruncated = regionLocations.get(0).getRegion(); + + assertEquals(4, regionLocations.size()); + + AssignmentTestingUtil.insertData(TEST_UTIL, tableName, 2, 21, sFamilies); + AssignmentTestingUtil.insertData(TEST_UTIL, tableName, 2, 31, sFamilies); + AssignmentTestingUtil.insertData(TEST_UTIL, tableName, 2, 61, sFamilies); + AssignmentTestingUtil.insertData(TEST_UTIL, tableName, 2, 91, sFamilies); + int rowCountBeforeTruncate = TEST_UTIL.countRows(tableName); + + // Act - Truncate the first region + admin.truncateRegion(regionToBeTruncated.getRegionName()).get(); + + // Assert + int rowCountAfterTruncate = TEST_UTIL.countRows(tableName); + assertNotEquals(rowCountBeforeTruncate, rowCountAfterTruncate); + int expectedRowCount = rowCountBeforeTruncate - 2;// Since region with 2 rows was truncated. + assertEquals(expectedRowCount, rowCountAfterTruncate); + } + + @Test + public void testTruncateReplicaRegionNotAllowed() throws Exception { + // Arrange - Create table, insert data, identify region to truncate. + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("30"), Bytes.toBytes("60"), Bytes.toBytes("90") }; + String family1 = "f1"; + String family2 = "f2"; + + final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) }; + createTableWithDefaultConf(tableName, 2, splitKeys, bFamilies); + + AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + List regionLocations = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); + RegionInfo primaryRegion = regionLocations.get(0).getRegion(); + + RegionInfo firstReplica = RegionReplicaUtil.getRegionInfoForReplica(primaryRegion, 1); + + // Act - Truncate the first region + try { + admin.truncateRegion(firstReplica.getRegionName()).get(); + } catch (Exception e) { + // Assert + assertEquals("Expected message is different", + "Can't truncate replicas directly.Replicas are auto-truncated " + + "when their primary is truncated.", + e.getCause().getMessage()); + } + } + + @Test + public void testTruncateRegionsMetaTableRegionsNotAllowed() throws Exception { + AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(META_TABLE_NAME); + List regionLocations = locator.getAllRegionLocations().get(); + HRegionLocation regionToBeTruncated = regionLocations.get(0); + // 1 + try { + admin.truncateRegion(regionToBeTruncated.getRegion().getRegionName()).get(); + fail(); + } catch (ExecutionException e) { + // expected + assertThat(e.getCause(), instanceOf(IOException.class)); + assertEquals("Can't truncate region in catalog tables", e.getCause().getMessage()); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index a19b6ffbec64..7faa7750cdff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -79,6 +79,12 @@ public void checkTableModifiable(TableName tableName) throws IOException { // no-op } + @Override + public long truncateRegion(RegionInfo regionInfo, long nonceGroup, long nonce) + throws IOException { + return 0; + } + @Override public long createTable(final TableDescriptor desc, final byte[][] splitKeys, final long nonceGroup, final long nonce) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateRegionProcedure.java new file mode 100644 index 000000000000..8ce60ee5550a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateRegionProcedure.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil.insertData; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.assignment.TestSplitTableRegionProcedure; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; + +@SuppressWarnings("OptionalGetWithoutIsPresent") +@Category({ MasterTests.class, LargeTests.class }) +public class TestTruncateRegionProcedure extends TestTableDDLProcedureBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTruncateRegionProcedure.class); + private static final Logger LOG = LoggerFactory.getLogger(TestTruncateRegionProcedure.class); + + @Rule + public TestName name = new TestName(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); + conf.set("hbase.coprocessor.region.classes", + TestSplitTableRegionProcedure.RegionServerHostingReplicaSlowOpenCopro.class.getName()); + conf.setInt("hbase.client.sync.wait.timeout.msec", 60000); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(3); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + + // Turn off balancer, so it doesn't cut in and mess up our placements. + UTIL.getAdmin().balancerSwitch(false, true); + // Turn off the meta scanner, so it doesn't remove, parent on us. + UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (TableDescriptor htd : UTIL.getAdmin().listTableDescriptors()) { + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test + public void testTruncateRegionProcedure() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + // Arrange - Load table and prepare arguments values. + final TableName tableName = TableName.valueOf(name.getMethodName()); + final String[] families = new String[] { "f1", "f2" }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("30"), Bytes.toBytes("60"), Bytes.toBytes("90") }; + + MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, families); + + insertData(UTIL, tableName, 2, 20, families); + insertData(UTIL, tableName, 2, 31, families); + insertData(UTIL, tableName, 2, 61, families); + insertData(UTIL, tableName, 2, 91, families); + + assertEquals(8, UTIL.countRows(tableName)); + + int rowsBeforeDropRegion = 8; + + MasterProcedureEnv environment = procExec.getEnvironment(); + RegionInfo regionToBeTruncated = environment.getAssignmentManager().getAssignedRegions() + .stream().filter(r -> tableName.getNameAsString().equals(r.getTable().getNameAsString())) + .min((o1, o2) -> Bytes.compareTo(o1.getStartKey(), o2.getStartKey())).get(); + + // Act - Execute Truncate region procedure + long procId = + procExec.submitProcedure(new TruncateRegionProcedure(environment, regionToBeTruncated)); + ProcedureTestingUtility.waitProcedure(procExec, procId); + assertEquals(8 - 2, UTIL.countRows(tableName)); + + int rowsAfterDropRegion = UTIL.countRows(tableName); + assertTrue("Row counts after truncate region should be less than row count before it", + rowsAfterDropRegion < rowsBeforeDropRegion); + assertEquals(rowsBeforeDropRegion, rowsAfterDropRegion + 2); + + insertData(UTIL, tableName, 2, 20, families); + assertEquals(8, UTIL.countRows(tableName)); + } + + @Test + public void testTruncateRegionProcedureErrorWhenSpecifiedReplicaRegionID() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + // Arrange - Load table and prepare arguments values. + final TableName tableName = TableName.valueOf(name.getMethodName()); + final String[] families = new String[] { "f1", "f2" }; + createTable(tableName, families, 2); + insertData(UTIL, tableName, 2, 20, families); + insertData(UTIL, tableName, 2, 30, families); + insertData(UTIL, tableName, 2, 60, families); + + assertEquals(6, UTIL.countRows(tableName)); + + MasterProcedureEnv environment = procExec.getEnvironment(); + RegionInfo regionToBeTruncated = environment.getAssignmentManager().getAssignedRegions() + .stream().filter(r -> tableName.getNameAsString().equals(r.getTable().getNameAsString())) + .min((o1, o2) -> Bytes.compareTo(o1.getStartKey(), o2.getStartKey())).get(); + + RegionInfo replicatedRegionId = + RegionReplicaUtil.getRegionInfoForReplica(regionToBeTruncated, 1); + + // Act - Execute Truncate region procedure + long procId = + procExec.submitProcedure(new TruncateRegionProcedure(environment, replicatedRegionId)); + + ProcedureTestingUtility.waitProcedure(procExec, procId); + Procedure result = procExec.getResult(procId); + // Asserts + + assertEquals(ProcedureProtos.ProcedureState.ROLLEDBACK, result.getState()); + assertTrue(result.getException().getMessage() + .endsWith("Can't truncate replicas directly. Replicas are auto-truncated " + + "when their primary is truncated.")); + } + + private TableDescriptor tableDescriptor(final TableName tableName, String[] families, + final int replicaCount) { + return TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount) + .setColumnFamilies(columnFamilyDescriptor(families)).build(); + } + + private List columnFamilyDescriptor(String[] families) { + return Arrays.stream(families).map(ColumnFamilyDescriptorBuilder::of) + .collect(Collectors.toList()); + } + + @SuppressWarnings("SameParameterValue") + private void createTable(final TableName tableName, String[] families, final int replicaCount) + throws IOException { + UTIL.getAdmin().createTable(tableDescriptor(tableName, families, replicaCount)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 9b1d8524d003..02087fb0a661 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -412,6 +412,16 @@ public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throw return admin.splitRegionAsync(regionName, splitPoint); } + @Override + public void truncateRegion(byte[] regionName) throws IOException { + admin.truncateRegion(regionName); + } + + @Override + public Future truncateRegionAsync(byte[] regionName) throws IOException { + return admin.truncateRegionAsync(regionName); + } + public Future modifyTableAsync(TableDescriptor td) throws IOException { return admin.modifyTableAsync(td); } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 7477b8ec164f..53a8137fc0cb 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -196,6 +196,16 @@ def split(table_or_region_name, split_point = nil) end end + #---------------------------------------------------------------------------------------------- + # Requests a region truncate + def truncate_region(region_name) + begin + org.apache.hadoop.hbase.util.FutureUtils.get(@admin.truncateRegionAsync(region_name.to_java_bytes)) + rescue java.lang.IllegalArgumentException, org.apache.hadoop.hbase.UnknownRegionException + @admin.truncate_region(region_name.to_java_bytes) + end + end + #---------------------------------------------------------------------------------------------- # Enable/disable one split or merge switch # Returns previous switch setting. diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 1d579319a5ca..414ab9d2bd51 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -487,6 +487,7 @@ def self.exception_handler(hide_traceback) list_decommissioned_regionservers decommission_regionservers recommission_regionserver + truncate_region ], # TODO: remove older hlog_roll command aliases: { diff --git a/hbase-shell/src/main/ruby/shell/commands/truncate_region.rb b/hbase-shell/src/main/ruby/shell/commands/truncate_region.rb new file mode 100644 index 000000000000..b443d3210bc4 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/truncate_region.rb @@ -0,0 +1,36 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class TruncateRegion < Command + def help + <<-EOF +Truncate individual region. +Examples: + truncate_region 'REGIONNAME' + truncate_region 'ENCODED_REGIONNAME' +EOF + end + def command(region_name) + admin.truncate_region(region_name) + end + end + end +end diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index 99fb27e0f769..4efcbb112765 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -168,6 +168,17 @@ def teardown #------------------------------------------------------------------------------- + define_test "truncate region should work" do + @t_name = 'hbase_shell_truncate_region' + drop_test_table(@t_name) + admin.create(@t_name, 'a', NUMREGIONS => 10, SPLITALGO => 'HexStringSplit') + r1 = command(:locate_region, @t_name, '1') + region1 = r1.getRegion.getRegionNameAsString + command(:truncate_region, region1) + end + + #------------------------------------------------------------------------------- + define_test "drop should fail on non-existent tables" do assert_raise(ArgumentError) do command(:drop, 'NOT.EXISTS') diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 1b7b6938524a..70ce37faf47e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -725,6 +725,16 @@ public void split(TableName tableName, byte[] splitPoint) { throw new NotImplementedException("split not supported in ThriftAdmin"); } + @Override + public void truncateRegion(byte[] regionName) throws IOException { + throw new NotImplementedException("Truncate Region not supported in ThriftAdmin"); + } + + @Override + public Future truncateRegionAsync(byte[] regionName) { + throw new NotImplementedException("Truncate Region Async not supported in ThriftAdmin"); + } + @Override public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) { throw new NotImplementedException("splitRegionAsync not supported in ThriftAdmin"); From 41057bbf357e308f7f65887c2e5fd92931276119 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 24 Oct 2023 09:30:11 +0100 Subject: [PATCH 118/514] HBASE-28170 Put the cached time at the beginning of the block; run cache validation in the background when retrieving the persistent cache (#5471) Signed-off-by: Peter Somogyi --- .../hbase/io/hfile/HFilePreadReader.java | 39 +++--- .../hbase/io/hfile/bucket/BucketCache.java | 100 ++++++++++---- .../io/hfile/bucket/BucketCachePersister.java | 41 ++++-- .../hbase/io/hfile/bucket/FileIOEngine.java | 16 +-- .../io/hfile/TestPrefetchWithBucketCache.java | 2 +- .../TestRecoveryPersistentBucketCache.java | 126 ++++++++++++++++++ .../bucket/TestVerifyBucketCacheFile.java | 12 +- 7 files changed, 266 insertions(+), 70 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index f9c0ae592424..1ac9a4ffb842 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -40,7 +40,9 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); final MutableBoolean fileAlreadyCached = new MutableBoolean(false); - BucketCache.getBuckedCacheFromCacheConfig(cacheConf).ifPresent(bc -> fileAlreadyCached + Optional bucketCacheOptional = + BucketCache.getBucketCacheFromCacheConfig(cacheConf); + bucketCacheOptional.ifPresent(bc -> fileAlreadyCached .setValue(bc.getFullyCachedFiles().get(path.getName()) == null ? false : true)); // Prefetch file blocks upon open if requested if ( @@ -65,8 +67,6 @@ public void run() { if (LOG.isTraceEnabled()) { LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, end)); } - Optional bucketCacheOptional = - BucketCache.getBuckedCacheFromCacheConfig(cacheConf); // Don't use BlockIterator here, because it's designed to read load-on-open section. long onDiskSizeOfNextBlock = -1; while (offset < end) { @@ -78,21 +78,24 @@ public void run() { // to the next block without actually going read all the way to the cache. if (bucketCacheOptional.isPresent()) { BucketCache cache = bucketCacheOptional.get(); - BlockCacheKey cacheKey = new BlockCacheKey(name, offset); - BucketEntry entry = cache.getBackingMap().get(cacheKey); - if (entry != null) { - cacheKey = new BlockCacheKey(name, offset); - entry = cache.getBackingMap().get(cacheKey); - if (entry == null) { - LOG.debug("No cache key {}, we'll read and cache it", cacheKey); + if (cache.getBackingMapValidated().get()) { + BlockCacheKey cacheKey = new BlockCacheKey(name, offset); + BucketEntry entry = cache.getBackingMap().get(cacheKey); + if (entry != null) { + cacheKey = new BlockCacheKey(name, offset); + entry = cache.getBackingMap().get(cacheKey); + if (entry == null) { + LOG.debug("No cache key {}, we'll read and cache it", cacheKey); + } else { + offset += entry.getOnDiskSizeWithHeader(); + LOG.debug( + "Found cache key {}. Skipping prefetch, the block is already cached.", + cacheKey); + continue; + } } else { - offset += entry.getOnDiskSizeWithHeader(); - LOG.debug("Found cache key {}. Skipping prefetch, the block is already cached.", - cacheKey); - continue; + LOG.debug("No entry in the backing map for cache key {}", cacheKey); } - } else { - LOG.debug("No entry in the backing map for cache key {}", cacheKey); } } // Perhaps we got our block from cache? Unlikely as this may be, if it happens, then @@ -111,9 +114,7 @@ public void run() { block.release(); } } - BucketCache.getBuckedCacheFromCacheConfig(cacheConf) - .ifPresent(bc -> bc.fileCacheCompleted(path.getName())); - + bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path.getName())); } catch (IOException e) { // IOExceptions are probably due to region closes (relocation, etc.) if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index bc5e7e7c9b9a..c082273b53b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -149,6 +149,8 @@ public class BucketCache implements BlockCache, HeapSize { // In this map, store the block's meta data like offset, length transient Map backingMap; + private AtomicBoolean backingMapValidated = new AtomicBoolean(false); + /** Set of files for which prefetch is completed */ final Map fullyCachedFiles = new ConcurrentHashMap<>(); @@ -312,7 +314,6 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck this.allocFailLogPrevTs = 0; - bucketAllocator = new BucketAllocator(capacity, bucketSizes); for (int i = 0; i < writerThreads.length; ++i) { writerQueues.add(new ArrayBlockingQueue<>(writerQLen)); } @@ -329,10 +330,14 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck try { retrieveFromFile(bucketSizes); } catch (IOException ioex) { + LOG.error("Can't restore from file[{}] because of ", persistencePath, ioex); backingMap.clear(); fullyCachedFiles.clear(); - LOG.error("Can't restore from file[" + persistencePath + "] because of ", ioex); + backingMapValidated.set(true); + bucketAllocator = new BucketAllocator(capacity, bucketSizes); } + } else { + bucketAllocator = new BucketAllocator(capacity, bucketSizes); } final String threadName = Thread.currentThread().getName(); this.cacheEnabled = true; @@ -385,6 +390,7 @@ protected void startWriterThreads() { } void startBucketCachePersisterThread() { + LOG.info("Starting BucketCachePersisterThread"); cachePersister = new BucketCachePersister(this, bucketcachePersistInterval); cachePersister.setDaemon(true); cachePersister.start(); @@ -540,6 +546,7 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach } else { this.blockNumber.increment(); this.heapSize.add(cachedItem.heapSize()); + blocksByHFile.add(cacheKey); } } @@ -600,6 +607,7 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, // the cache map state might differ from the actual cache. If we reach this block, // we should remove the cache key entry from the backing map backingMap.remove(key); + fullyCachedFiles.remove(key.getHfileName()); LOG.debug("Failed to fetch block for cache key: {}.", key, hioex); } catch (IOException ioex) { LOG.error("Failed reading block " + key + " from bucket cache", ioex); @@ -695,6 +703,7 @@ private boolean doEvictBlock(BlockCacheKey cacheKey, BucketEntry bucketEntry, } else { return bucketEntryToUse.withWriteLock(offsetLock, () -> { if (backingMap.remove(cacheKey, bucketEntryToUse)) { + LOG.debug("removed key {} from back map in the evict process", cacheKey); blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, evictedByEvictionProcess); return true; } @@ -1266,6 +1275,8 @@ static List getRAMQueueEntries(BlockingQueue q, @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.") void persistToFile() throws IOException { + LOG.debug("Thread {} started persisting bucket cache to file", + Thread.currentThread().getName()); if (!isCachePersistent()) { throw new IOException("Attempt to persist non-persistent cache mappings!"); } @@ -1273,14 +1284,19 @@ void persistToFile() throws IOException { try (FileOutputStream fos = new FileOutputStream(tempPersistencePath, false)) { fos.write(ProtobufMagic.PB_MAGIC); BucketProtoUtils.toPB(this).writeDelimitedTo(fos); + } catch (IOException e) { + LOG.error("Failed to persist bucket cache to file", e); + throw e; } + LOG.debug("Thread {} finished persisting bucket cache to file, renaming", + Thread.currentThread().getName()); if (!tempPersistencePath.renameTo(new File(persistencePath))) { LOG.warn("Failed to commit cache persistent file. We might lose cached blocks if " + "RS crashes/restarts before we successfully checkpoint again."); } } - private boolean isCachePersistent() { + public boolean isCachePersistent() { return ioEngine.isPersistent() && persistencePath != null; } @@ -1288,8 +1304,14 @@ private boolean isCachePersistent() { * @see #persistToFile() */ private void retrieveFromFile(int[] bucketSizes) throws IOException { + LOG.info("Started retrieving bucket cache from file"); File persistenceFile = new File(persistencePath); if (!persistenceFile.exists()) { + LOG.warn("Persistence file missing! " + + "It's ok if it's first run after enabling persistent cache."); + bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize); + blockNumber.add(backingMap.size()); + backingMapValidated.set(true); return; } assert !cacheEnabled; @@ -1311,6 +1333,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in)); bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize); blockNumber.add(backingMap.size()); + LOG.info("Bucket cache retrieved from file successfully"); } } @@ -1383,27 +1406,43 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio try { ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), algorithm); + backingMapValidated.set(true); } catch (IOException e) { LOG.warn("Checksum for cache file failed. " - + "We need to validate each cache key in the backing map. This may take some time..."); - long startTime = EnvironmentEdgeManager.currentTime(); - int totalKeysOriginally = backingMap.size(); - for (Map.Entry keyEntry : backingMap.entrySet()) { - try { - ((FileIOEngine) ioEngine).checkCacheTime(keyEntry.getValue()); - } catch (IOException e1) { - LOG.debug("Check for key {} failed. Removing it from map.", keyEntry.getKey()); - backingMap.remove(keyEntry.getKey()); - fullyCachedFiles.remove(keyEntry.getKey().getHfileName()); + + "We need to validate each cache key in the backing map. " + + "This may take some time, so we'll do it in a background thread,"); + Runnable cacheValidator = () -> { + while (bucketAllocator == null) { + try { + Thread.sleep(50); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } } - } - LOG.info("Finished validating {} keys in the backing map. Recovered: {}. This took {}ms.", - totalKeysOriginally, backingMap.size(), - (EnvironmentEdgeManager.currentTime() - startTime)); + long startTime = EnvironmentEdgeManager.currentTime(); + int totalKeysOriginally = backingMap.size(); + for (Map.Entry keyEntry : backingMap.entrySet()) { + try { + ((FileIOEngine) ioEngine).checkCacheTime(keyEntry.getValue()); + } catch (IOException e1) { + LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey()); + evictBlock(keyEntry.getKey()); + fullyCachedFiles.remove(keyEntry.getKey().getHfileName()); + } + } + backingMapValidated.set(true); + LOG.info("Finished validating {} keys in the backing map. Recovered: {}. This took {}ms.", + totalKeysOriginally, backingMap.size(), + (EnvironmentEdgeManager.currentTime() - startTime)); + }; + Thread t = new Thread(cacheValidator); + t.setDaemon(true); + t.start(); } } else { // if has not checksum, it means the persistence file is old format LOG.info("Persistent file is old format, it does not support verifying file integrity!"); + backingMapValidated.set(true); } verifyCapacityAndClasses(proto.getCacheCapacity(), proto.getIoClass(), proto.getMapClass()); } @@ -1432,6 +1471,7 @@ private void checkIOErrorIsTolerated() { */ private void disableCache() { if (!cacheEnabled) return; + LOG.info("Disabling cache"); cacheEnabled = false; ioEngine.shutdown(); this.scheduleThreadPool.shutdown(); @@ -1456,11 +1496,15 @@ public void shutdown() { LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() + "; path to write=" + persistencePath); if (ioEngine.isPersistent() && persistencePath != null) { - if (cachePersister != null) { - cachePersister.interrupt(); - } try { join(); + if (cachePersister != null) { + LOG.info("Shutting down cache persister thread."); + cachePersister.shutdown(); + while (cachePersister.isAlive()) { + Thread.sleep(10); + } + } persistToFile(); } catch (IOException ex) { LOG.error("Unable to persist data on exit: " + ex.toString(), ex); @@ -1665,17 +1709,17 @@ public BucketEntry writeToCache(final IOEngine ioEngine, final BucketAllocator a HFileBlock block = (HFileBlock) data; ByteBuff sliceBuf = block.getBufferReadOnly(); block.getMetaData(metaBuff); - ioEngine.write(sliceBuf, offset); - // adds the cache time after the block and metadata part + // adds the cache time prior to the block and metadata part if (isCachePersistent) { - ioEngine.write(metaBuff, offset + len - metaBuff.limit() - Long.BYTES); ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); buffer.putLong(bucketEntry.getCachedTime()); buffer.rewind(); - ioEngine.write(buffer, (offset + len - Long.BYTES)); + ioEngine.write(buffer, offset); + ioEngine.write(sliceBuf, (offset + Long.BYTES)); } else { - ioEngine.write(metaBuff, offset + len - metaBuff.limit()); + ioEngine.write(sliceBuf, offset); } + ioEngine.write(metaBuff, offset + len - metaBuff.limit()); } else { // Only used for testing. ByteBuffer bb = ByteBuffer.allocate(len); @@ -1917,11 +1961,15 @@ public Map getBackingMap() { return backingMap; } + public AtomicBoolean getBackingMapValidated() { + return backingMapValidated; + } + public Map getFullyCachedFiles() { return fullyCachedFiles; } - public static Optional getBuckedCacheFromCacheConfig(CacheConfig cacheConf) { + public static Optional getBucketCacheFromCacheConfig(CacheConfig cacheConf) { if (cacheConf.getBlockCache().isPresent()) { BlockCache bc = cacheConf.getBlockCache().get(); if (bc instanceof CombinedBlockCache) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCachePersister.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCachePersister.java index dbea4f3f325a..e4382d2561e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCachePersister.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCachePersister.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,8 @@ public class BucketCachePersister extends Thread { private final long intervalMillis; private static final Logger LOG = LoggerFactory.getLogger(BucketCachePersister.class); + private AtomicBoolean shutdown = new AtomicBoolean(false); + public BucketCachePersister(BucketCache cache, long intervalMillis) { super("bucket-cache-persister"); this.cache = cache; @@ -36,20 +39,34 @@ public BucketCachePersister(BucketCache cache, long intervalMillis) { } public void run() { - while (true) { - try { - Thread.sleep(intervalMillis); - if (cache.isCacheInconsistent()) { - LOG.debug("Cache is inconsistent, persisting to disk"); - cache.persistToFile(); - cache.setCacheInconsistent(false); + try { + while (true) { + try { + Thread.sleep(intervalMillis); + if (cache.isCacheInconsistent()) { + LOG.debug("Cache is inconsistent, persisting to disk"); + cache.persistToFile(); + cache.setCacheInconsistent(false); + } + // Thread.interrupt may cause an InterruptException inside util method used for checksum + // calculation in persistToFile. This util currently swallows the exception, causing this + // thread to net get interrupt, so we added this flag to indicate the persister thread + // should stop. + if (shutdown.get()) { + break; + } + } catch (IOException e) { + LOG.warn("Exception in BucketCachePersister.", e); } - } catch (IOException e) { - LOG.warn("IOException in BucketCachePersister {} ", e.getMessage()); - } catch (InterruptedException iex) { - LOG.warn("InterruptedException in BucketCachePersister {} ", iex.getMessage()); - break; } + LOG.info("Finishing cache persister thread."); + } catch (InterruptedException e) { + LOG.warn("Interrupting BucketCachePersister thread.", e); } } + + public void shutdown() { + this.shutdown.set(true); + this.interrupt(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index 38f9db04b6d7..ba431c4c6dcb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -149,15 +149,14 @@ public Cacheable read(BucketEntry be) throws IOException { } } if (maintainPersistence) { - dstBuff.position(length - Long.BYTES); + dstBuff.rewind(); long cachedNanoTime = dstBuff.getLong(); if (be.getCachedTime() != cachedNanoTime) { dstBuff.release(); - throw new HBaseIOException("The cached time recorded within the cached block differs " - + "from its bucket entry, so it might not be the same."); + throw new HBaseIOException("The cached time recorded within the cached block: " + + cachedNanoTime + " differs from its bucket entry: " + be.getCachedTime()); } - dstBuff.rewind(); - dstBuff.limit(length - Long.BYTES); + dstBuff.limit(length); dstBuff = dstBuff.slice(); } else { dstBuff.rewind(); @@ -167,10 +166,9 @@ public Cacheable read(BucketEntry be) throws IOException { void checkCacheTime(BucketEntry be) throws IOException { long offset = be.offset(); - int length = be.getLength(); ByteBuff dstBuff = be.allocator.allocate(Long.BYTES); try { - accessFile(readAccessor, dstBuff, (offset + length - Long.BYTES)); + accessFile(readAccessor, dstBuff, offset); } catch (IOException ioe) { dstBuff.release(); throw ioe; @@ -179,8 +177,8 @@ void checkCacheTime(BucketEntry be) throws IOException { long cachedNanoTime = dstBuff.getLong(); if (be.getCachedTime() != cachedNanoTime) { dstBuff.release(); - throw new HBaseIOException("The cached time recorded within the cached block differs " - + "from its bucket entry, so it might not be the same."); + throw new HBaseIOException("The cached time recorded within the cached block: " + + cachedNanoTime + " differs from its bucket entry: " + be.getCachedTime()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index e4330308243d..e5c6b42fcc40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -106,7 +106,7 @@ public void testPrefetchDoesntOverwork() throws Exception { // Prefetches the file blocks LOG.debug("First read should prefetch the blocks."); readStoreFile(storeFile); - BucketCache bc = BucketCache.getBuckedCacheFromCacheConfig(cacheConf).get(); + BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get(); // Our file should have 6 DATA blocks. We should wait for all of them to be cached Waiter.waitFor(conf, 300, () -> bc.getBackingMap().size() == 6); Map snapshot = ImmutableMap.copyOf(bc.getBackingMap()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java new file mode 100644 index 000000000000..ad91d01f8cfd --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile.bucket; + +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY; +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; +import org.apache.hadoop.hbase.io.hfile.Cacheable; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Basic test for check file's integrity before start BucketCache in fileIOEngine + */ +@Category(SmallTests.class) +public class TestRecoveryPersistentBucketCache { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRecoveryPersistentBucketCache.class); + + final long capacitySize = 32 * 1024 * 1024; + final int writeThreads = BucketCache.DEFAULT_WRITER_THREADS; + final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS; + + @Test + public void testBucketCacheRecovery() throws Exception { + HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + Path testDir = TEST_UTIL.getDataTestDir(); + TEST_UTIL.getTestFileSystem().mkdirs(testDir); + Configuration conf = HBaseConfiguration.create(); + // Disables the persister thread by setting its interval to MAX_VALUE + conf.setLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, Long.MAX_VALUE); + int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, conf); + + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 4); + + CacheTestUtils.HFileBlockPair[] smallerBlocks = CacheTestUtils.generateHFileBlocks(4096, 1); + // Add four blocks + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[0].getBlockName(), blocks[0].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[1].getBlockName(), blocks[1].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[2].getBlockName(), blocks[2].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[3].getBlockName(), blocks[3].getBlock()); + // saves the current state of the cache + bucketCache.persistToFile(); + // evicts the 4th block + bucketCache.evictBlock(blocks[3].getBlockName()); + // now adds a 5th block to bucket cache. This block is half the size of the previous + // blocks, and it will be added in the same offset of the previous evicted block. + // This overwrites part of the 4th block. Because we persisted only up to the + // 4th block addition, recovery would try to read the whole 4th block, but the cached time + // validation will fail, and we'll recover only the first three blocks + cacheAndWaitUntilFlushedToBucket(bucketCache, smallerBlocks[0].getBlockName(), + smallerBlocks[0].getBlock()); + + // Creates new bucket cache instance without persisting to file after evicting 4th block + // and caching 5th block. Here the cache file has the first three blocks, followed by the + // 5th block and the second half of 4th block (we evicted 4th block, freeing up its + // offset in the cache, then added 5th block which is half the size of other blocks, so it's + // going to override the first half of the 4th block in the cache). That's fine because + // the in-memory backing map has the right blocks and related offsets. However, the + // persistent map file only has information about the first four blocks. We validate the + // cache time recorded in the back map against the block data in the cache. This is recorded + // in the cache as the first 8 bytes of a block, so the 4th block had its first 8 blocks + // now overridden by the 5th block, causing this check to fail and removal of + // the 4th block from the backing map. + BucketCache newBucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, conf); + Thread.sleep(100); + assertEquals(3, newBucketCache.backingMap.size()); + assertNull(newBucketCache.getBlock(blocks[3].getBlockName(), false, false, false)); + assertNull(newBucketCache.getBlock(smallerBlocks[0].getBlockName(), false, false, false)); + assertEquals(blocks[0].getBlock(), + newBucketCache.getBlock(blocks[0].getBlockName(), false, false, false)); + assertEquals(blocks[1].getBlock(), + newBucketCache.getBlock(blocks[1].getBlockName(), false, false, false)); + assertEquals(blocks[2].getBlock(), + newBucketCache.getBlock(blocks[2].getBlockName(), false, false, false)); + TEST_UTIL.cleanupTestDir(); + } + + private void waitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey) + throws InterruptedException { + while (!cache.backingMap.containsKey(cacheKey) || cache.ramCache.containsKey(cacheKey)) { + Thread.sleep(100); + } + } + + // BucketCache.cacheBlock is async, it first adds block to ramCache and writeQueue, then writer + // threads will flush it to the bucket and put reference entry in backingMap. + private void cacheAndWaitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey, + Cacheable block) throws InterruptedException { + cache.cacheBlock(cacheKey, block); + waitUntilFlushedToBucket(cache, cacheKey); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java index 6fdea844aa32..0d33fb079bcd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java @@ -127,6 +127,7 @@ public void testRetrieveFromFile() throws Exception { bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + Thread.sleep(100); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); // Add blocks @@ -146,6 +147,7 @@ public void testRetrieveFromFile() throws Exception { bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + Thread.sleep(100); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -201,9 +203,12 @@ public void testModifiedBucketCacheFileData() throws Exception { Path testDir = TEST_UTIL.getDataTestDir(); TEST_UTIL.getTestFileSystem().mkdirs(testDir); - BucketCache bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + Configuration conf = HBaseConfiguration.create(); + // Disables the persister thread by setting its interval to MAX_VALUE + conf.setLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, Long.MAX_VALUE); + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, + testDir + "/bucket.persistence", DEFAULT_ERROR_TOLERATION_DURATION, conf); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); @@ -228,6 +233,7 @@ public void testModifiedBucketCacheFileData() throws Exception { bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + Thread.sleep(100); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); From 2399539510739ec87fe79948b429fa8eeeef0028 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Tue, 24 Oct 2023 10:25:31 -0400 Subject: [PATCH 119/514] HBASE-28017 Set request and response size metrics in NettyRpcDuplexHandler (#5473) Co-authored-by: Ray Mattingly Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- .../java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 47b0b29a5c6e..ad8c51568a32 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -110,6 +110,7 @@ private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise p } else { ctx.write(buf, promise); } + call.callStats.setRequestSizeBytes(totalSize); } } @@ -193,6 +194,7 @@ private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOExcep } return; } + call.callStats.setResponseSizeBytes(totalSize); if (remoteExc != null) { call.setException(remoteExc); return; From 13d46e7f8cd488d9d352baef4eacb0d8681d0295 Mon Sep 17 00:00:00 2001 From: ZhangIssac <58984599+ZhangIssac@users.noreply.github.com> Date: Wed, 25 Oct 2023 15:08:41 +0800 Subject: [PATCH 120/514] HBASE-28145 When specifying the wrong BoolFilter type while creating a table in HBase shell, the log prompt will report an error. (#5460) Co-authored-by: xiaozhang Signed-off-by: Duo Zhang --- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 53a8137fc0cb..453b7ae1af6e 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1182,7 +1182,7 @@ def cfd(arg, tdb) if org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype) cfdb.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype)) else - raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(' ')) + raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.BloomType.constants.join(' ')) end end if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION) From e473346e9b3ca34b5c1e7d16fca3789fd9428196 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 25 Oct 2023 16:32:53 -0700 Subject: [PATCH 121/514] HBASE-28157. hbck should report previously reported regions with null region location (#5463) Ensure that hbck will report as inconsistent regions where previously a location was reported but now the region location is null, if it is not expected to be offline. Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani Reviewed-by: Shanmukha Haripriya Kota --- .../org/apache/hadoop/hbase/master/hbck/HbckChore.java | 5 +---- .../src/main/resources/hbase-webapps/master/hbck.jsp | 4 ++++ .../hadoop/hbase/master/assignment/TestHbckChore.java | 7 +++++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java index 3a0fd15d6ead..75df2da5a71e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java @@ -230,10 +230,7 @@ private void loadRegionsFromRSReport(final HbckReport report) { for (Map.Entry entry : report.getRegionInfoMap().entrySet()) { HbckRegionInfo hri = entry.getValue(); - ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); - if (locationInMeta == null) { - continue; - } + ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); // can be null if (hri.getDeployedOn().size() == 0) { // skip the offline region which belong to disabled table. if (report.getDisabledTableRegions().contains(hri.getRegionNameAsString())) { diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index 7a385a0a2a64..38e16ca8e28f 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -355,9 +355,13 @@ * If a live server reference, make it a link. * If dead, make it italic. * If unknown, make it plain. + * If null, make it "null". */ private static String formatServerName(HMaster master, ServerManager serverManager, ServerName serverName) { + if (serverName == null) { + return "null"; + } String sn = serverName.toString(); if (serverManager.isServerOnline(serverName)) { int infoPort = master.getRegionServerInfoPort(serverName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java index b99bdb0090de..70afeae4c6ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java @@ -150,6 +150,13 @@ public void testForUserTable() throws Exception { hbckChore.choreForTesting(); inconsistentRegions = hbckChore.getLastReport().getInconsistentRegions(); assertFalse(inconsistentRegions.containsKey(regionName)); + + // Test for case4: No region location for a previously reported region. Probably due to + // TRSP bug or bypass. + am.offlineRegion(hri); + hbckChore.choreForTesting(); + inconsistentRegions = hbckChore.getLastReport().getInconsistentRegions(); + assertTrue(inconsistentRegions.containsKey(regionName)); } @Test From 0749fad44894d4188402500441bdf27de38f6691 Mon Sep 17 00:00:00 2001 From: Monani Mihir Date: Fri, 27 Oct 2023 01:45:58 -0700 Subject: [PATCH 122/514] =?UTF-8?q?HBASE-28168=20Add=20option=20in=20Regio?= =?UTF-8?q?nMover.java=20to=20isolate=20one=20or=20more=20reg=E2=80=A6=20(?= =?UTF-8?q?#5470)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Wellington Chevreuil --- .../apache/hadoop/hbase/util/RegionMover.java | 182 +++++++++++++++++- .../hadoop/hbase/util/TestRegionMover2.java | 177 +++++++++++++++++ 2 files changed, 351 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 4de8ecc88c86..c1f98edd75ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -30,6 +30,7 @@ import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -53,6 +54,8 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; @@ -60,10 +63,16 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RackManager; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -96,6 +105,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { private boolean ack = true; private int maxthreads = 1; private int timeout; + private List isolateRegionIdArray; private String loadUnload; private String hostname; private String filename; @@ -112,6 +122,7 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { this.excludeFile = builder.excludeFile; this.designatedFile = builder.designatedFile; this.maxthreads = builder.maxthreads; + this.isolateRegionIdArray = builder.isolateRegionIdArray; this.ack = builder.ack; this.port = builder.port; this.timeout = builder.timeout; @@ -156,6 +167,7 @@ public static class RegionMoverBuilder { private boolean ack = true; private int maxthreads = 1; private int timeout = Integer.MAX_VALUE; + private List isolateRegionIdArray = new ArrayList<>(); private String hostname; private String filename; private String excludeFile = null; @@ -216,6 +228,14 @@ public RegionMoverBuilder maxthreads(int threads) { return this; } + /** + * Set the region ID to isolate on the region server. + */ + public RegionMoverBuilder isolateRegionIdArray(List isolateRegionIdArray) { + this.isolateRegionIdArray = isolateRegionIdArray; + return this; + } + /** * Path of file containing hostnames to be excluded during region movement. Exclude file should * have 'host:port' per line. Port is mandatory here as we can have many RS running on a single @@ -409,6 +429,25 @@ public boolean unloadFromRack() } private boolean unloadRegions(boolean unloadFromRack) + throws ExecutionException, InterruptedException, TimeoutException { + return unloadRegions(unloadFromRack, null); + } + + /** + * Isolated regions specified in {@link #isolateRegionIdArray} on {@link #hostname} in ack Mode + * and Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}. + * In noAck mode we do not make sure that region is successfully online on the target region + * server,hence it is the best effort. We do not unload regions to hostnames given in + * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions + * to hostnames provided in {@link #designatedFile} + * @return true if region isolation succeeded, false otherwise + */ + public boolean isolateRegions() + throws ExecutionException, InterruptedException, TimeoutException { + return unloadRegions(false, isolateRegionIdArray); + } + + private boolean unloadRegions(boolean unloadFromRack, List isolateRegionIdArray) throws InterruptedException, ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); @@ -466,7 +505,7 @@ private boolean unloadRegions(boolean unloadFromRack) } else { LOG.info("Available servers {}", regionServers); } - unloadRegions(server, regionServers, movedRegions); + unloadRegions(server, regionServers, movedRegions, isolateRegionIdArray); } catch (Exception e) { LOG.error("Error while unloading regions ", e); return false; @@ -497,9 +536,111 @@ Collection filterRSGroupServers(RSGroupInfo rsgroup, } private void unloadRegions(ServerName server, List regionServers, - List movedRegions) throws Exception { + List movedRegions, List isolateRegionIdArray) throws Exception { while (true) { + List isolateRegionInfoList = Collections.synchronizedList(new ArrayList<>()); + RegionInfo isolateRegionInfo = null; + if (isolateRegionIdArray != null && !isolateRegionIdArray.isEmpty()) { + // Region will be moved to target region server with Ack mode. + final ExecutorService isolateRegionPool = Executors.newFixedThreadPool(maxthreads); + List> isolateRegionTaskList = new ArrayList<>(); + List recentlyIsolatedRegion = Collections.synchronizedList(new ArrayList<>()); + boolean allRegionOpsSuccessful = true; + boolean isMetaIsolated = false; + RegionInfo metaRegionInfo = RegionInfoBuilder.FIRST_META_REGIONINFO; + List hRegionLocationRegionIsolation = + Collections.synchronizedList(new ArrayList<>()); + for (String isolateRegionId : isolateRegionIdArray) { + if (isolateRegionId.equalsIgnoreCase(metaRegionInfo.getEncodedName())) { + isMetaIsolated = true; + continue; + } + Result result = MetaTableAccessor.scanByRegionEncodedName(conn, isolateRegionId); + HRegionLocation hRegionLocation = + MetaTableAccessor.getRegionLocation(conn, result.getRow()); + if (hRegionLocation != null) { + hRegionLocationRegionIsolation.add(hRegionLocation); + } else { + LOG.error("Region " + isolateRegionId + " doesn't exists/can't fetch from" + + " meta...Quitting now"); + // We only move the regions if all the regions were found. + allRegionOpsSuccessful = false; + break; + } + } + + if (!allRegionOpsSuccessful) { + break; + } + // If hbase:meta region was isolated, then it needs to be part of isolateRegionInfoList. + if (isMetaIsolated) { + ZKWatcher zkWatcher = new ZKWatcher(conf, null, null); + List result = new ArrayList<>(); + for (String znode : zkWatcher.getMetaReplicaNodes()) { + String path = ZNodePaths.joinZNode(zkWatcher.getZNodePaths().baseZNode, znode); + int replicaId = zkWatcher.getZNodePaths().getMetaReplicaIdFromPath(path); + RegionState state = MetaTableLocator.getMetaRegionState(zkWatcher, replicaId); + result.add(new HRegionLocation(state.getRegion(), state.getServerName())); + } + ServerName metaSeverName = result.get(0).getServerName(); + // For isolating hbase:meta, it should move explicitly in Ack mode, + // hence the forceMoveRegionByAck = true. + if (!metaSeverName.equals(server)) { + LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " is on server " + + metaSeverName + " moving to " + server); + submitRegionMovesWhileUnloading(metaSeverName, Collections.singletonList(server), + movedRegions, Collections.singletonList(metaRegionInfo), true); + } else { + LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " already exists" + + " on server : " + server); + } + isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO); + } + + if (!hRegionLocationRegionIsolation.isEmpty()) { + for (HRegionLocation hRegionLocation : hRegionLocationRegionIsolation) { + isolateRegionInfo = hRegionLocation.getRegion(); + isolateRegionInfoList.add(isolateRegionInfo); + if (hRegionLocation.getServerName() == server) { + LOG.info("Region " + hRegionLocation.getRegion().getEncodedName() + " already exists" + + " on server : " + server.getHostname()); + } else { + Future isolateRegionTask = + isolateRegionPool.submit(new MoveWithAck(conn, isolateRegionInfo, + hRegionLocation.getServerName(), server, recentlyIsolatedRegion)); + isolateRegionTaskList.add(isolateRegionTask); + } + } + } + + if (!isolateRegionTaskList.isEmpty()) { + isolateRegionPool.shutdown(); + // Now that we have fetched all the region's regionInfo, we can move them. + waitMoveTasksToFinish(isolateRegionPool, isolateRegionTaskList, + admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX)); + + Set currentRegionsOnTheServer = new HashSet<>(admin.getRegions(server)); + if (!currentRegionsOnTheServer.containsAll(isolateRegionInfoList)) { + // If all the regions are not online on the target server, + // we don't put RS in decommission mode and exit from here. + LOG.error("One of the Region move failed OR stuck in transition...Quitting now"); + break; + } + } else { + LOG.info("All regions already exists on server : " + server.getHostname()); + } + // Once region has been moved to target RS, put the target RS into decommission mode, + // so master doesn't assign new region to the target RS while we unload the target RS. + // Also pass 'offload' flag as false since we don't want master to offload the target RS. + List listOfServer = new ArrayList<>(); + listOfServer.add(server); + LOG.info("Putting server : " + server.getHostname() + " in decommission/draining mode"); + admin.decommissionRegionServers(listOfServer, false); + } List regionsToMove = admin.getRegions(server); + // Remove all the regions from the online Region list, that we just isolated. + // This will also include hbase:meta if it was isolated. + regionsToMove.removeAll(isolateRegionInfoList); regionsToMove.removeAll(movedRegions); if (regionsToMove.isEmpty()) { LOG.info("No Regions to move....Quitting now"); @@ -511,21 +652,25 @@ private void unloadRegions(ServerName server, List regionServers, Optional metaRegion = getMetaRegionInfoIfToBeMoved(regionsToMove); if (metaRegion.isPresent()) { RegionInfo meta = metaRegion.get(); + // hbase:meta should move explicitly in Ack mode. submitRegionMovesWhileUnloading(server, regionServers, movedRegions, - Collections.singletonList(meta)); + Collections.singletonList(meta), true); regionsToMove.remove(meta); } - submitRegionMovesWhileUnloading(server, regionServers, movedRegions, regionsToMove); + submitRegionMovesWhileUnloading(server, regionServers, movedRegions, regionsToMove, false); } } private void submitRegionMovesWhileUnloading(ServerName server, List regionServers, - List movedRegions, List regionsToMove) throws Exception { + List movedRegions, List regionsToMove, boolean forceMoveRegionByAck) + throws Exception { final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); List> taskList = new ArrayList<>(); int serverIndex = 0; for (RegionInfo regionToMove : regionsToMove) { - if (ack) { + // To move/isolate hbase:meta on a server, it should happen explicitly by Ack mode, hence the + // forceMoveRegionByAck = true. + if (ack || forceMoveRegionByAck) { Future task = moveRegionsPool.submit(new MoveWithAck(conn, regionToMove, server, regionServers.get(serverIndex), movedRegions)); taskList.add(task); @@ -771,9 +916,17 @@ private ServerName stripServer(List regionServers, String hostname, @Override protected void addOptions() { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); - this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); + this.addRequiredOptWithArg("o", "operation", + "Expected: load/unload/unload_from_rack/isolate_regions"); this.addOptWithArg("m", "maxthreads", "Define the maximum number of threads to use to unload and reload the regions"); + this.addOptWithArg("i", "isolateRegionIds", + "Comma separated list of Region IDs hash to isolate on a RegionServer and put region server" + + " in draining mode. This option should only be used with '-o isolate_regions'." + + " By putting region server in decommission/draining mode, master can't assign any" + + " new region on this server. If one or more regions are not found OR failed to isolate" + + " successfully, utility will exist without putting RS in draining/decommission mode." + + " Ex. --isolateRegionIds id1,id2,id3 OR -i id1,id2,id3"); this.addOptWithArg("x", "excludefile", "File with per line to exclude as unload targets; default excludes only " + "target host; useful for rack decommisioning."); @@ -795,9 +948,14 @@ protected void addOptions() { protected void processOptions(CommandLine cmd) { String hostname = cmd.getOptionValue("r"); rmbuilder = new RegionMoverBuilder(hostname); + this.loadUnload = cmd.getOptionValue("o").toLowerCase(Locale.ROOT); if (cmd.hasOption('m')) { rmbuilder.maxthreads(Integer.parseInt(cmd.getOptionValue('m'))); } + if (this.loadUnload.equals("isolate_regions") && cmd.hasOption("isolateRegionIds")) { + rmbuilder + .isolateRegionIdArray(Arrays.asList(cmd.getOptionValue("isolateRegionIds").split(","))); + } if (cmd.hasOption('n')) { rmbuilder.ack(false); } @@ -813,7 +971,6 @@ protected void processOptions(CommandLine cmd) { if (cmd.hasOption('t')) { rmbuilder.timeout(Integer.parseInt(cmd.getOptionValue('t'))); } - this.loadUnload = cmd.getOptionValue("o").toLowerCase(Locale.ROOT); } @Override @@ -826,6 +983,15 @@ protected int doWork() throws Exception { success = rm.unload(); } else if (loadUnload.equalsIgnoreCase("unload_from_rack")) { success = rm.unloadFromRack(); + } else if (loadUnload.equalsIgnoreCase("isolate_regions")) { + if (rm.isolateRegionIdArray != null && !rm.isolateRegionIdArray.isEmpty()) { + success = rm.isolateRegions(); + } else { + LOG.error("Missing -i/--isolate_regions option with '-o isolate_regions' option"); + LOG.error("Use -h or --help for usage instructions"); + printUsage(); + success = false; + } } else { printUsage(); success = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java index 2f145ad3d023..ec8c44592e94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java @@ -23,18 +23,25 @@ import java.util.stream.Collectors; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -210,4 +217,174 @@ public void testFailedRegionMove() throws Exception { } } + public void loadDummyDataInTable(TableName tableName) throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + Table table = TEST_UTIL.getConnection().getTable(tableName); + List puts = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + puts.add(new Put(Bytes.toBytes("rowkey_" + i)).addColumn(Bytes.toBytes("fam1"), + Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))); + } + table.put(puts); + admin.flush(tableName); + } + + @Test + public void testIsolateSingleRegionOnTheSameServer() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + loadDummyDataInTable(tableName); + ServerName sourceServerName = findSourceServerName(tableName); + // Isolating 1 region on the same region server. + regionIsolationOperation(sourceServerName, sourceServerName, 1, false); + } + + @Test + public void testIsolateSingleRegionOnTheDifferentServer() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + loadDummyDataInTable(tableName); + ServerName sourceServerName = findSourceServerName(tableName); + ServerName destinationServerName = findDestinationServerName(sourceServerName); + // Isolating 1 region on the different region server. + regionIsolationOperation(sourceServerName, destinationServerName, 1, false); + } + + @Test + public void testIsolateMultipleRegionsOnTheSameServer() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + loadDummyDataInTable(tableName); + ServerName sourceServerName = findSourceServerName(tableName); + // Isolating 2 regions on the same region server. + regionIsolationOperation(sourceServerName, sourceServerName, 2, false); + } + + @Test + public void testIsolateMultipleRegionsOnTheDifferentServer() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + loadDummyDataInTable(tableName); + // Isolating 2 regions on the different region server. + ServerName sourceServerName = findSourceServerName(tableName); + ServerName destinationServerName = findDestinationServerName(sourceServerName); + regionIsolationOperation(sourceServerName, destinationServerName, 2, false); + } + + @Test + public void testIsolateMetaOnTheSameSever() throws Exception { + ServerName metaServerSource = findMetaRSLocation(); + regionIsolationOperation(metaServerSource, metaServerSource, 1, true); + } + + @Test + public void testIsolateMetaOnTheDifferentServer() throws Exception { + ServerName metaServerSource = findMetaRSLocation(); + ServerName metaServerDestination = findDestinationServerName(metaServerSource); + regionIsolationOperation(metaServerSource, metaServerDestination, 1, true); + } + + @Test + public void testIsolateMetaAndRandomRegionOnTheMetaServer() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + loadDummyDataInTable(tableName); + ServerName metaServerSource = findMetaRSLocation(); + ServerName randomSeverRegion = findSourceServerName(tableName); + regionIsolationOperation(randomSeverRegion, metaServerSource, 2, true); + } + + @Test + public void testIsolateMetaAndRandomRegionOnTheRandomServer() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + loadDummyDataInTable(tableName); + ServerName randomSeverRegion = findSourceServerName(tableName); + regionIsolationOperation(randomSeverRegion, randomSeverRegion, 2, true); + } + + public ServerName findMetaRSLocation() throws Exception { + ZKWatcher zkWatcher = new ZKWatcher(TEST_UTIL.getConfiguration(), null, null); + List result = new ArrayList<>(); + for (String znode : zkWatcher.getMetaReplicaNodes()) { + String path = ZNodePaths.joinZNode(zkWatcher.getZNodePaths().baseZNode, znode); + int replicaId = zkWatcher.getZNodePaths().getMetaReplicaIdFromPath(path); + RegionState state = MetaTableLocator.getMetaRegionState(zkWatcher, replicaId); + result.add(new HRegionLocation(state.getRegion(), state.getServerName())); + } + return result.get(0).getServerName(); + } + + public ServerName findSourceServerName(TableName tableName) throws Exception { + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + int numOfRS = cluster.getNumLiveRegionServers(); + ServerName sourceServer = null; + for (int i = 0; i < numOfRS; i++) { + HRegionServer regionServer = cluster.getRegionServer(i); + List hRegions = regionServer.getRegions().stream() + .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) + .collect(Collectors.toList()); + if (hRegions.size() >= 2) { + sourceServer = regionServer.getServerName(); + break; + } + } + if (sourceServer == null) { + throw new Exception( + "This shouln't happen, No RS found with more than 2 regions of table : " + tableName); + } + return sourceServer; + } + + public ServerName findDestinationServerName(ServerName sourceServerName) throws Exception { + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + ServerName destinationServerName = null; + int numOfRS = cluster.getNumLiveRegionServers(); + for (int i = 0; i < numOfRS; i++) { + destinationServerName = cluster.getRegionServer(i).getServerName(); + if (!destinationServerName.equals(sourceServerName)) { + break; + } + } + if (destinationServerName == null) { + throw new Exception("This shouldn't happen, No RS found which is different than source RS"); + } + return destinationServerName; + } + + public void regionIsolationOperation(ServerName sourceServerName, + ServerName destinationServerName, int numRegionsToIsolate, boolean isolateMetaAlso) + throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + Admin admin = TEST_UTIL.getAdmin(); + HRegionServer sourceRS = cluster.getRegionServer(sourceServerName); + List hRegions = sourceRS.getRegions().stream() + .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) + .collect(Collectors.toList()); + List listOfRegionIDsToIsolate = new ArrayList<>(); + for (int i = 0; i < numRegionsToIsolate; i++) { + listOfRegionIDsToIsolate.add(hRegions.get(i).getRegionInfo().getEncodedName()); + } + + if (isolateMetaAlso) { + listOfRegionIDsToIsolate.remove(0); + listOfRegionIDsToIsolate.add(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + } + + HRegionServer destinationRS = cluster.getRegionServer(destinationServerName); + String destinationRSName = destinationRS.getServerName().getAddress().toString(); + RegionMover.RegionMoverBuilder rmBuilder = + new RegionMover.RegionMoverBuilder(destinationRSName, TEST_UTIL.getConfiguration()).ack(true) + .maxthreads(8).isolateRegionIdArray(listOfRegionIDsToIsolate); + try (RegionMover rm = rmBuilder.build()) { + LOG.debug("Unloading {} except regions : {}", destinationRS.getServerName(), + listOfRegionIDsToIsolate); + rm.isolateRegions(); + Assert.assertEquals(numRegionsToIsolate, destinationRS.getNumberOfOnlineRegions()); + List onlineRegions = destinationRS.getRegions(); + for (int i = 0; i < numRegionsToIsolate; i++) { + Assert.assertTrue( + listOfRegionIDsToIsolate.contains(onlineRegions.get(i).getRegionInfo().getEncodedName())); + } + LOG.debug("Successfully Isolated " + listOfRegionIDsToIsolate.size() + " regions : " + + listOfRegionIDsToIsolate + " on " + destinationRS.getServerName()); + } finally { + admin.recommissionRegionServer(destinationRS.getServerName(), null); + } + } } From 208e9b1a82885106e9d0e0df6472e475c73599ce Mon Sep 17 00:00:00 2001 From: "Tak Lon (Stephen) Wu" Date: Sun, 29 Oct 2023 16:43:07 -0700 Subject: [PATCH 123/514] HBASE-28164 Add hbase-connectos 1.0.1 to download page (#5483) Signed-off-by: Duo Zhang --- src/site/xdoc/downloads.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 72643bc8bf86..7e163612f96b 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -131,22 +131,22 @@ under the License. - 1.0.0 + 1.0.1 - 2019/05/03 + 2023/10/27 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
+ src (sha512 asc)
+ bin (sha512 asc)
From a97373965e3aca45e0a43d38d6de2827c8f4c4ae Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 1 Nov 2023 10:46:32 +0800 Subject: [PATCH 124/514] HBASE-28153 Upgrade zookeeper to a newer version (#5475) Signed-off-by: Nick Dimiduk Signed-off-by: Andrew Purtell Signed-off-by: Bryan Beaudreault --- hbase-it/pom.xml | 7 --- hbase-server/pom.xml | 7 --- hbase-zookeeper/pom.xml | 9 +++ pom.xml | 133 +++++++++++----------------------------- 4 files changed, 44 insertions(+), 112 deletions(-) diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index bc67f21c7dc3..c3f884a16b0d 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -132,13 +132,6 @@ io.opentelemetry opentelemetry-api - - - io.netty - netty - ${netty.hadoop.version} - test - org.slf4j jcl-over-slf4j diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 2455c199cb2b..c04947529bf3 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -699,13 +699,6 @@ - org.apache.hadoop hadoop-minikdc diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml index f1711fdf3387..8fceb26d5b08 100644 --- a/hbase-zookeeper/pom.xml +++ b/hbase-zookeeper/pom.xml @@ -139,6 +139,15 @@ log4j-slf4j-impl test + + + org.xerial.snappy + snappy-java + + + commons-cli + commons-cli + diff --git a/pom.xml b/pom.xml index 7d0bb49dcdab..aedb08077d56 100644 --- a/pom.xml +++ b/pom.xml @@ -808,9 +808,18 @@ --> ${hadoop-three.version} src/main/assembly/hadoop-three-compat.xml - - 3.10.5.Final + + 3.10.6.Final + 4.1.100.Final 0.13.0 com.google.code.findbugs jsr305 @@ -3941,14 +3901,6 @@ org.slf4j slf4j-reload4j - - io.netty - netty - - - io.netty - netty-all - @@ -3990,13 +3942,6 @@ org.slf4j slf4j-reload4j - org.codehaus.jackson * @@ -4062,14 +4007,6 @@ stax stax-api - - io.netty - netty - - - io.netty - netty-all - com.google.code.findbugs jsr305 From c37fba61e2db17567f0457e3525aec6fe1aa11af Mon Sep 17 00:00:00 2001 From: Kota-SH Date: Wed, 1 Nov 2023 04:38:50 -0500 Subject: [PATCH 125/514] HBASE-27794: Tooling for parsing/reading the prefetch files list file (#5468) Signed-off-by: Wellingt --- .../org/apache/hadoop/hbase/client/Admin.java | 5 +++++ .../hbase/client/AdminOverAsyncAdmin.java | 5 +++++ .../apache/hadoop/hbase/client/AsyncAdmin.java | 5 +++++ .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 +++++ .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 13 +++++++++++++ .../hbase/shaded/protobuf/ProtobufUtil.java | 17 +++++++++++++++++ .../src/main/protobuf/server/region/Admin.proto | 10 ++++++++++ .../hadoop/hbase/io/hfile/BlockCache.java | 9 +++++++++ .../hbase/io/hfile/CombinedBlockCache.java | 10 ++++++++++ .../hadoop/hbase/io/hfile/HFilePreadReader.java | 5 +++-- .../hbase/io/hfile/bucket/BucketCache.java | 5 +++-- .../hadoop/hbase/master/MasterRpcServices.java | 8 ++++++++ .../hbase/regionserver/RSRpcServices.java | 13 +++++++++++++ .../hbase/io/hfile/TestPrefetchRSClose.java | 16 +++++++++++++++- .../io/hfile/TestPrefetchWithBucketCache.java | 2 +- .../hadoop/hbase/master/MockRegionServer.java | 8 ++++++++ .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 +++++ .../hbase/thrift2/client/ThriftAdmin.java | 5 +++++ 18 files changed, 140 insertions(+), 6 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 417e0013523a..1f22c0fe0a86 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2629,4 +2629,9 @@ List getLogEntries(Set serverNames, String logType, Server * Flush master local region */ void flushMasterStore() throws IOException; + + /** + * Get the list of cached files + */ + List getCachedFilesList(ServerName serverName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index bb620aa3cdaa..9c8f03a1057c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1125,4 +1125,9 @@ public List getLogEntries(Set serverNames, String logType, public void flushMasterStore() throws IOException { get(admin.flushMasterStore()); } + + @Override + public List getCachedFilesList(ServerName serverName) throws IOException { + return get(admin.getCachedFilesList(serverName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 1097abbbf5e2..313d2f01c881 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1843,4 +1843,9 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Flush master local region */ CompletableFuture flushMasterStore(); + + /** + * Get the list of cached files + */ + CompletableFuture> getCachedFilesList(ServerName serverName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 0c7fd0f7b354..247acf485889 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -995,4 +995,9 @@ public CompletableFuture> getLogEntries(Set serverNam public CompletableFuture flushMasterStore() { return wrap(rawAdmin.flushMasterStore()); } + + @Override + public CompletableFuture> getCachedFilesList(ServerName serverName) { + return wrap(rawAdmin.getCachedFilesList(serverName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 953dd2024767..92a95d491f0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -132,6 +132,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -4519,4 +4521,15 @@ Void> call(controller, stub, request.build(), (s, c, req, done) -> s.flushMasterStore(c, req, done), resp -> null)) .call(); } + + @Override + public CompletableFuture> getCachedFilesList(ServerName serverName) { + GetCachedFilesListRequest.Builder request = GetCachedFilesListRequest.newBuilder(); + return this.> newAdminCaller() + .action((controller, stub) -> this.> adminCall(controller, stub, request.build(), + (s, c, req, done) -> s.getCachedFilesList(c, req, done), + resp -> resp.getCachedFilesList())) + .serverName(serverName).call(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index c14a0d042823..d2e14df1c8d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -153,6 +153,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -1780,6 +1782,21 @@ public static List getOnlineRegions( return getRegionInfos(response); } + /** + * Get the list of cached files + */ + public static List getCachedFilesList(final RpcController controller, + final AdminService.BlockingInterface admin) throws IOException { + GetCachedFilesListRequest request = GetCachedFilesListRequest.newBuilder().build(); + GetCachedFilesListResponse response = null; + try { + response = admin.getCachedFilesList(controller, request); + } catch (ServiceException se) { + throw getRemoteException(se); + } + return new ArrayList<>(response.getCachedFilesList()); + } + /** * Get the list of region info from a GetOnlineRegionResponse * @param proto the GetOnlineRegionResponse diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index cd88a0ca7cdb..308b1a8b6d62 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -283,6 +283,13 @@ message ExecuteProceduresRequest { message ExecuteProceduresResponse { } +message GetCachedFilesListRequest { +} + +message GetCachedFilesListResponse { + repeated string cached_files = 1; +} + /** * Slow/Large log (LogRequest) use-case specific RPC request. This request payload will be * converted in bytes and sent to generic RPC API: GetLogEntries @@ -405,4 +412,7 @@ service AdminService { rpc GetLogEntries(LogRequest) returns(LogEntry); + rpc GetCachedFilesList(GetCachedFilesListRequest) + returns(GetCachedFilesListResponse); + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 4e795ec75e75..e480c9b5789b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; +import java.util.Map; +import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; /** @@ -161,4 +163,11 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe default boolean isMetaBlock(BlockType blockType) { return blockType != null && blockType.getCategory() != BlockType.BlockCategory.DATA; } + + /** + * Returns the list of fully cached files + */ + default Optional> getFullyCachedFiles() { + return Optional.empty(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index d616d6f40d9f..57c103562d70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; +import java.util.Map; +import java.util.Optional; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.yetus.audience.InterfaceAudience; @@ -382,6 +384,14 @@ public BlockCache[] getBlockCaches() { return new BlockCache[] { this.l1Cache, this.l2Cache }; } + /** + * Returns the list of fully cached files + */ + @Override + public Optional> getFullyCachedFiles() { + return this.l2Cache.getFullyCachedFiles(); + } + @Override public void setMaxSize(long size) { this.l1Cache.setMaxSize(size); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 1ac9a4ffb842..f1579ea53b8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -42,8 +42,9 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c final MutableBoolean fileAlreadyCached = new MutableBoolean(false); Optional bucketCacheOptional = BucketCache.getBucketCacheFromCacheConfig(cacheConf); - bucketCacheOptional.ifPresent(bc -> fileAlreadyCached - .setValue(bc.getFullyCachedFiles().get(path.getName()) == null ? false : true)); + bucketCacheOptional.flatMap(BucketCache::getFullyCachedFiles).ifPresent(fcf -> { + fileAlreadyCached.setValue(fcf.get(path.getName()) == null ? false : true); + }); // Prefetch file blocks upon open if requested if ( cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index c082273b53b7..e3d740383085 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1965,8 +1965,9 @@ public AtomicBoolean getBackingMapValidated() { return backingMapValidated; } - public Map getFullyCachedFiles() { - return fullyCachedFiles; + @Override + public Optional> getFullyCachedFiles() { + return Optional.of(fullyCachedFiles); } public static Optional getBucketCacheFromCacheConfig(CacheConfig cacheConf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 736fbae0dea9..9ff69d436469 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -164,6 +164,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -3575,6 +3577,12 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); } + @Override + public GetCachedFilesListResponse getCachedFilesList(RpcController controller, + GetCachedFilesListRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + @Override public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller, GetLiveRegionServersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 57efe505c126..4f04457e91b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -169,6 +169,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -3933,4 +3935,15 @@ public void onConfigurationChange(Configuration conf) { super.onConfigurationChange(conf); setReloadableGuardrails(conf); } + + @Override + public GetCachedFilesListResponse getCachedFilesList(RpcController controller, + GetCachedFilesListRequest request) throws ServiceException { + GetCachedFilesListResponse.Builder responseBuilder = GetCachedFilesListResponse.newBuilder(); + List fullyCachedFiles = new ArrayList<>(); + server.getBlockCache().flatMap(BlockCache::getFullyCachedFiles).ifPresent(fcf -> { + fullyCachedFiles.addAll(fcf.keySet()); + }); + return responseBuilder.addAllCachedFiles(fullyCachedFiles).build(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java index 64db9158333d..879d8566c82e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.io.File; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -30,11 +31,14 @@ import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -112,7 +116,17 @@ public void testPrefetchPersistence() throws Exception { // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files // should exist. - assertTrue(new File(testDir + "/bucket.persistence").exists()); + + HRegionServer regionServingRS = cluster.getRegionServer(1).getRegions(tableName).size() == 1 + ? cluster.getRegionServer(1) + : cluster.getRegionServer(0); + + Admin admin = TEST_UTIL.getAdmin(); + List cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName()); + assertEquals(1, cachedFilesList.size()); + for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) { + assertTrue(cachedFilesList.contains(h.getPath().getName())); + } // Stop the RS cluster.stopRegionServer(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index e5c6b42fcc40..93f09231f740 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -123,7 +123,7 @@ public void testPrefetchDoesntOverwork() throws Exception { BlockCacheKey key = snapshot.keySet().stream().findFirst().get(); LOG.debug("removing block {}", key); bc.getBackingMap().remove(key); - bc.getFullyCachedFiles().remove(storeFile.getName()); + bc.getFullyCachedFiles().ifPresent(fcf -> fcf.remove(storeFile.getName())); assertTrue(snapshot.size() > bc.getBackingMap().size()); LOG.debug("Third read should prefetch again, as we removed one block for the file."); readStoreFile(storeFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 9974c824f889..a7164a6fab64 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -97,6 +97,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -452,6 +454,12 @@ public GetOnlineRegionResponse getOnlineRegion(RpcController controller, return null; } + @Override + public GetCachedFilesListResponse getCachedFilesList(RpcController controller, + GetCachedFilesListRequest request) throws ServiceException { + return null; + } + @Override public List getRegions() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 02087fb0a661..f98a1c78ad5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -969,6 +969,11 @@ public void flushMasterStore() throws IOException { admin.flushMasterStore(); } + @Override + public List getCachedFilesList(ServerName serverName) throws IOException { + return admin.getCachedFilesList(serverName); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 70ce37faf47e..c88572b9f3c7 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1345,6 +1345,11 @@ public void flushMasterStore() throws IOException { throw new NotImplementedException("flushMasterStore not supported in ThriftAdmin"); } + @Override + public List getCachedFilesList(ServerName serverName) throws IOException { + throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { From b961eb2ddd025fffbc47451854066c963687ad0c Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Wed, 1 Nov 2023 09:50:26 +0000 Subject: [PATCH 126/514] Revert "HBASE-27794: Tooling for parsing/reading the prefetch files list file (#5468)" This reverts commit c37fba61e2db17567f0457e3525aec6fe1aa11af. --- .../org/apache/hadoop/hbase/client/Admin.java | 5 ----- .../hbase/client/AdminOverAsyncAdmin.java | 5 ----- .../apache/hadoop/hbase/client/AsyncAdmin.java | 5 ----- .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 ----- .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 13 ------------- .../hbase/shaded/protobuf/ProtobufUtil.java | 17 ----------------- .../src/main/protobuf/server/region/Admin.proto | 10 ---------- .../hadoop/hbase/io/hfile/BlockCache.java | 9 --------- .../hbase/io/hfile/CombinedBlockCache.java | 10 ---------- .../hadoop/hbase/io/hfile/HFilePreadReader.java | 5 ++--- .../hbase/io/hfile/bucket/BucketCache.java | 5 ++--- .../hadoop/hbase/master/MasterRpcServices.java | 8 -------- .../hbase/regionserver/RSRpcServices.java | 13 ------------- .../hbase/io/hfile/TestPrefetchRSClose.java | 16 +--------------- .../io/hfile/TestPrefetchWithBucketCache.java | 2 +- .../hadoop/hbase/master/MockRegionServer.java | 8 -------- .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 ----- .../hbase/thrift2/client/ThriftAdmin.java | 5 ----- 18 files changed, 6 insertions(+), 140 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 1f22c0fe0a86..417e0013523a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2629,9 +2629,4 @@ List getLogEntries(Set serverNames, String logType, Server * Flush master local region */ void flushMasterStore() throws IOException; - - /** - * Get the list of cached files - */ - List getCachedFilesList(ServerName serverName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 9c8f03a1057c..bb620aa3cdaa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1125,9 +1125,4 @@ public List getLogEntries(Set serverNames, String logType, public void flushMasterStore() throws IOException { get(admin.flushMasterStore()); } - - @Override - public List getCachedFilesList(ServerName serverName) throws IOException { - return get(admin.getCachedFilesList(serverName)); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 313d2f01c881..1097abbbf5e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1843,9 +1843,4 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Flush master local region */ CompletableFuture flushMasterStore(); - - /** - * Get the list of cached files - */ - CompletableFuture> getCachedFilesList(ServerName serverName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 247acf485889..0c7fd0f7b354 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -995,9 +995,4 @@ public CompletableFuture> getLogEntries(Set serverNam public CompletableFuture flushMasterStore() { return wrap(rawAdmin.flushMasterStore()); } - - @Override - public CompletableFuture> getCachedFilesList(ServerName serverName) { - return wrap(rawAdmin.getCachedFilesList(serverName)); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 92a95d491f0d..953dd2024767 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -132,8 +132,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -4521,15 +4519,4 @@ Void> call(controller, stub, request.build(), (s, c, req, done) -> s.flushMasterStore(c, req, done), resp -> null)) .call(); } - - @Override - public CompletableFuture> getCachedFilesList(ServerName serverName) { - GetCachedFilesListRequest.Builder request = GetCachedFilesListRequest.newBuilder(); - return this.> newAdminCaller() - .action((controller, stub) -> this.> adminCall(controller, stub, request.build(), - (s, c, req, done) -> s.getCachedFilesList(c, req, done), - resp -> resp.getCachedFilesList())) - .serverName(serverName).call(); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d2e14df1c8d6..c14a0d042823 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -153,8 +153,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -1782,21 +1780,6 @@ public static List getOnlineRegions( return getRegionInfos(response); } - /** - * Get the list of cached files - */ - public static List getCachedFilesList(final RpcController controller, - final AdminService.BlockingInterface admin) throws IOException { - GetCachedFilesListRequest request = GetCachedFilesListRequest.newBuilder().build(); - GetCachedFilesListResponse response = null; - try { - response = admin.getCachedFilesList(controller, request); - } catch (ServiceException se) { - throw getRemoteException(se); - } - return new ArrayList<>(response.getCachedFilesList()); - } - /** * Get the list of region info from a GetOnlineRegionResponse * @param proto the GetOnlineRegionResponse diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index 308b1a8b6d62..cd88a0ca7cdb 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -283,13 +283,6 @@ message ExecuteProceduresRequest { message ExecuteProceduresResponse { } -message GetCachedFilesListRequest { -} - -message GetCachedFilesListResponse { - repeated string cached_files = 1; -} - /** * Slow/Large log (LogRequest) use-case specific RPC request. This request payload will be * converted in bytes and sent to generic RPC API: GetLogEntries @@ -412,7 +405,4 @@ service AdminService { rpc GetLogEntries(LogRequest) returns(LogEntry); - rpc GetCachedFilesList(GetCachedFilesListRequest) - returns(GetCachedFilesListResponse); - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index e480c9b5789b..4e795ec75e75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; -import java.util.Map; -import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; /** @@ -163,11 +161,4 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe default boolean isMetaBlock(BlockType blockType) { return blockType != null && blockType.getCategory() != BlockType.BlockCategory.DATA; } - - /** - * Returns the list of fully cached files - */ - default Optional> getFullyCachedFiles() { - return Optional.empty(); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 57c103562d70..d616d6f40d9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; -import java.util.Map; -import java.util.Optional; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.yetus.audience.InterfaceAudience; @@ -384,14 +382,6 @@ public BlockCache[] getBlockCaches() { return new BlockCache[] { this.l1Cache, this.l2Cache }; } - /** - * Returns the list of fully cached files - */ - @Override - public Optional> getFullyCachedFiles() { - return this.l2Cache.getFullyCachedFiles(); - } - @Override public void setMaxSize(long size) { this.l1Cache.setMaxSize(size); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index f1579ea53b8e..1ac9a4ffb842 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -42,9 +42,8 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c final MutableBoolean fileAlreadyCached = new MutableBoolean(false); Optional bucketCacheOptional = BucketCache.getBucketCacheFromCacheConfig(cacheConf); - bucketCacheOptional.flatMap(BucketCache::getFullyCachedFiles).ifPresent(fcf -> { - fileAlreadyCached.setValue(fcf.get(path.getName()) == null ? false : true); - }); + bucketCacheOptional.ifPresent(bc -> fileAlreadyCached + .setValue(bc.getFullyCachedFiles().get(path.getName()) == null ? false : true)); // Prefetch file blocks upon open if requested if ( cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e3d740383085..c082273b53b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1965,9 +1965,8 @@ public AtomicBoolean getBackingMapValidated() { return backingMapValidated; } - @Override - public Optional> getFullyCachedFiles() { - return Optional.of(fullyCachedFiles); + public Map getFullyCachedFiles() { + return fullyCachedFiles; } public static Optional getBucketCacheFromCacheConfig(CacheConfig cacheConf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 9ff69d436469..736fbae0dea9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -164,8 +164,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -3577,12 +3575,6 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); } - @Override - public GetCachedFilesListResponse getCachedFilesList(RpcController controller, - GetCachedFilesListRequest request) throws ServiceException { - throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); - } - @Override public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller, GetLiveRegionServersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 4f04457e91b6..57efe505c126 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -169,8 +169,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -3935,15 +3933,4 @@ public void onConfigurationChange(Configuration conf) { super.onConfigurationChange(conf); setReloadableGuardrails(conf); } - - @Override - public GetCachedFilesListResponse getCachedFilesList(RpcController controller, - GetCachedFilesListRequest request) throws ServiceException { - GetCachedFilesListResponse.Builder responseBuilder = GetCachedFilesListResponse.newBuilder(); - List fullyCachedFiles = new ArrayList<>(); - server.getBlockCache().flatMap(BlockCache::getFullyCachedFiles).ifPresent(fcf -> { - fullyCachedFiles.addAll(fcf.keySet()); - }); - return responseBuilder.addAllCachedFiles(fullyCachedFiles).build(); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java index 879d8566c82e..64db9158333d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue; import java.io.File; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -31,14 +30,11 @@ import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -116,17 +112,7 @@ public void testPrefetchPersistence() throws Exception { // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files // should exist. - - HRegionServer regionServingRS = cluster.getRegionServer(1).getRegions(tableName).size() == 1 - ? cluster.getRegionServer(1) - : cluster.getRegionServer(0); - - Admin admin = TEST_UTIL.getAdmin(); - List cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName()); - assertEquals(1, cachedFilesList.size()); - for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) { - assertTrue(cachedFilesList.contains(h.getPath().getName())); - } + assertTrue(new File(testDir + "/bucket.persistence").exists()); // Stop the RS cluster.stopRegionServer(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index 93f09231f740..e5c6b42fcc40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -123,7 +123,7 @@ public void testPrefetchDoesntOverwork() throws Exception { BlockCacheKey key = snapshot.keySet().stream().findFirst().get(); LOG.debug("removing block {}", key); bc.getBackingMap().remove(key); - bc.getFullyCachedFiles().ifPresent(fcf -> fcf.remove(storeFile.getName())); + bc.getFullyCachedFiles().remove(storeFile.getName()); assertTrue(snapshot.size() > bc.getBackingMap().size()); LOG.debug("Third read should prefetch again, as we removed one block for the file."); readStoreFile(storeFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index a7164a6fab64..9974c824f889 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -97,8 +97,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -454,12 +452,6 @@ public GetOnlineRegionResponse getOnlineRegion(RpcController controller, return null; } - @Override - public GetCachedFilesListResponse getCachedFilesList(RpcController controller, - GetCachedFilesListRequest request) throws ServiceException { - return null; - } - @Override public List getRegions() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index f98a1c78ad5f..02087fb0a661 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -969,11 +969,6 @@ public void flushMasterStore() throws IOException { admin.flushMasterStore(); } - @Override - public List getCachedFilesList(ServerName serverName) throws IOException { - return admin.getCachedFilesList(serverName); - } - @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index c88572b9f3c7..70ce37faf47e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1345,11 +1345,6 @@ public void flushMasterStore() throws IOException { throw new NotImplementedException("flushMasterStore not supported in ThriftAdmin"); } - @Override - public List getCachedFilesList(ServerName serverName) throws IOException { - throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); - } - @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { From fa4c8960e587e9f62442c35fd11a85e1cc4b4690 Mon Sep 17 00:00:00 2001 From: Kota-SH Date: Wed, 1 Nov 2023 04:38:50 -0500 Subject: [PATCH 127/514] HBASE-27794: Tooling for parsing/reading the prefetch files list file (#5468) Signed-off-by: Wellington Chevreuil --- .../org/apache/hadoop/hbase/client/Admin.java | 5 +++++ .../hbase/client/AdminOverAsyncAdmin.java | 5 +++++ .../apache/hadoop/hbase/client/AsyncAdmin.java | 5 +++++ .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 +++++ .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 13 +++++++++++++ .../hbase/shaded/protobuf/ProtobufUtil.java | 17 +++++++++++++++++ .../src/main/protobuf/server/region/Admin.proto | 10 ++++++++++ .../hadoop/hbase/io/hfile/BlockCache.java | 9 +++++++++ .../hbase/io/hfile/CombinedBlockCache.java | 10 ++++++++++ .../hadoop/hbase/io/hfile/HFilePreadReader.java | 5 +++-- .../hbase/io/hfile/bucket/BucketCache.java | 5 +++-- .../hadoop/hbase/master/MasterRpcServices.java | 8 ++++++++ .../hbase/regionserver/RSRpcServices.java | 13 +++++++++++++ .../hbase/io/hfile/TestPrefetchRSClose.java | 16 +++++++++++++++- .../io/hfile/TestPrefetchWithBucketCache.java | 2 +- .../hadoop/hbase/master/MockRegionServer.java | 8 ++++++++ .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 +++++ .../hbase/thrift2/client/ThriftAdmin.java | 5 +++++ 18 files changed, 140 insertions(+), 6 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 417e0013523a..1f22c0fe0a86 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2629,4 +2629,9 @@ List getLogEntries(Set serverNames, String logType, Server * Flush master local region */ void flushMasterStore() throws IOException; + + /** + * Get the list of cached files + */ + List getCachedFilesList(ServerName serverName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index bb620aa3cdaa..9c8f03a1057c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1125,4 +1125,9 @@ public List getLogEntries(Set serverNames, String logType, public void flushMasterStore() throws IOException { get(admin.flushMasterStore()); } + + @Override + public List getCachedFilesList(ServerName serverName) throws IOException { + return get(admin.getCachedFilesList(serverName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 1097abbbf5e2..313d2f01c881 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1843,4 +1843,9 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Flush master local region */ CompletableFuture flushMasterStore(); + + /** + * Get the list of cached files + */ + CompletableFuture> getCachedFilesList(ServerName serverName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 0c7fd0f7b354..247acf485889 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -995,4 +995,9 @@ public CompletableFuture> getLogEntries(Set serverNam public CompletableFuture flushMasterStore() { return wrap(rawAdmin.flushMasterStore()); } + + @Override + public CompletableFuture> getCachedFilesList(ServerName serverName) { + return wrap(rawAdmin.getCachedFilesList(serverName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 953dd2024767..92a95d491f0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -132,6 +132,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -4519,4 +4521,15 @@ Void> call(controller, stub, request.build(), (s, c, req, done) -> s.flushMasterStore(c, req, done), resp -> null)) .call(); } + + @Override + public CompletableFuture> getCachedFilesList(ServerName serverName) { + GetCachedFilesListRequest.Builder request = GetCachedFilesListRequest.newBuilder(); + return this.> newAdminCaller() + .action((controller, stub) -> this.> adminCall(controller, stub, request.build(), + (s, c, req, done) -> s.getCachedFilesList(c, req, done), + resp -> resp.getCachedFilesList())) + .serverName(serverName).call(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index c14a0d042823..d2e14df1c8d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -153,6 +153,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -1780,6 +1782,21 @@ public static List getOnlineRegions( return getRegionInfos(response); } + /** + * Get the list of cached files + */ + public static List getCachedFilesList(final RpcController controller, + final AdminService.BlockingInterface admin) throws IOException { + GetCachedFilesListRequest request = GetCachedFilesListRequest.newBuilder().build(); + GetCachedFilesListResponse response = null; + try { + response = admin.getCachedFilesList(controller, request); + } catch (ServiceException se) { + throw getRemoteException(se); + } + return new ArrayList<>(response.getCachedFilesList()); + } + /** * Get the list of region info from a GetOnlineRegionResponse * @param proto the GetOnlineRegionResponse diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index cd88a0ca7cdb..308b1a8b6d62 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -283,6 +283,13 @@ message ExecuteProceduresRequest { message ExecuteProceduresResponse { } +message GetCachedFilesListRequest { +} + +message GetCachedFilesListResponse { + repeated string cached_files = 1; +} + /** * Slow/Large log (LogRequest) use-case specific RPC request. This request payload will be * converted in bytes and sent to generic RPC API: GetLogEntries @@ -405,4 +412,7 @@ service AdminService { rpc GetLogEntries(LogRequest) returns(LogEntry); + rpc GetCachedFilesList(GetCachedFilesListRequest) + returns(GetCachedFilesListResponse); + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 4e795ec75e75..e480c9b5789b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; +import java.util.Map; +import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; /** @@ -161,4 +163,11 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe default boolean isMetaBlock(BlockType blockType) { return blockType != null && blockType.getCategory() != BlockType.BlockCategory.DATA; } + + /** + * Returns the list of fully cached files + */ + default Optional> getFullyCachedFiles() { + return Optional.empty(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index d616d6f40d9f..57c103562d70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; +import java.util.Map; +import java.util.Optional; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.yetus.audience.InterfaceAudience; @@ -382,6 +384,14 @@ public BlockCache[] getBlockCaches() { return new BlockCache[] { this.l1Cache, this.l2Cache }; } + /** + * Returns the list of fully cached files + */ + @Override + public Optional> getFullyCachedFiles() { + return this.l2Cache.getFullyCachedFiles(); + } + @Override public void setMaxSize(long size) { this.l1Cache.setMaxSize(size); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 1ac9a4ffb842..f1579ea53b8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -42,8 +42,9 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c final MutableBoolean fileAlreadyCached = new MutableBoolean(false); Optional bucketCacheOptional = BucketCache.getBucketCacheFromCacheConfig(cacheConf); - bucketCacheOptional.ifPresent(bc -> fileAlreadyCached - .setValue(bc.getFullyCachedFiles().get(path.getName()) == null ? false : true)); + bucketCacheOptional.flatMap(BucketCache::getFullyCachedFiles).ifPresent(fcf -> { + fileAlreadyCached.setValue(fcf.get(path.getName()) == null ? false : true); + }); // Prefetch file blocks upon open if requested if ( cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index c082273b53b7..e3d740383085 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1965,8 +1965,9 @@ public AtomicBoolean getBackingMapValidated() { return backingMapValidated; } - public Map getFullyCachedFiles() { - return fullyCachedFiles; + @Override + public Optional> getFullyCachedFiles() { + return Optional.of(fullyCachedFiles); } public static Optional getBucketCacheFromCacheConfig(CacheConfig cacheConf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 736fbae0dea9..9ff69d436469 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -164,6 +164,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -3575,6 +3577,12 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); } + @Override + public GetCachedFilesListResponse getCachedFilesList(RpcController controller, + GetCachedFilesListRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + @Override public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller, GetLiveRegionServersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 57efe505c126..4f04457e91b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -169,6 +169,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -3933,4 +3935,15 @@ public void onConfigurationChange(Configuration conf) { super.onConfigurationChange(conf); setReloadableGuardrails(conf); } + + @Override + public GetCachedFilesListResponse getCachedFilesList(RpcController controller, + GetCachedFilesListRequest request) throws ServiceException { + GetCachedFilesListResponse.Builder responseBuilder = GetCachedFilesListResponse.newBuilder(); + List fullyCachedFiles = new ArrayList<>(); + server.getBlockCache().flatMap(BlockCache::getFullyCachedFiles).ifPresent(fcf -> { + fullyCachedFiles.addAll(fcf.keySet()); + }); + return responseBuilder.addAllCachedFiles(fullyCachedFiles).build(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java index 64db9158333d..879d8566c82e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.io.File; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -30,11 +31,14 @@ import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -112,7 +116,17 @@ public void testPrefetchPersistence() throws Exception { // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files // should exist. - assertTrue(new File(testDir + "/bucket.persistence").exists()); + + HRegionServer regionServingRS = cluster.getRegionServer(1).getRegions(tableName).size() == 1 + ? cluster.getRegionServer(1) + : cluster.getRegionServer(0); + + Admin admin = TEST_UTIL.getAdmin(); + List cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName()); + assertEquals(1, cachedFilesList.size()); + for (HStoreFile h : regionServingRS.getRegions().get(0).getStores().get(0).getStorefiles()) { + assertTrue(cachedFilesList.contains(h.getPath().getName())); + } // Stop the RS cluster.stopRegionServer(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index e5c6b42fcc40..93f09231f740 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -123,7 +123,7 @@ public void testPrefetchDoesntOverwork() throws Exception { BlockCacheKey key = snapshot.keySet().stream().findFirst().get(); LOG.debug("removing block {}", key); bc.getBackingMap().remove(key); - bc.getFullyCachedFiles().remove(storeFile.getName()); + bc.getFullyCachedFiles().ifPresent(fcf -> fcf.remove(storeFile.getName())); assertTrue(snapshot.size() > bc.getBackingMap().size()); LOG.debug("Third read should prefetch again, as we removed one block for the file."); readStoreFile(storeFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 9974c824f889..a7164a6fab64 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -97,6 +97,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -452,6 +454,12 @@ public GetOnlineRegionResponse getOnlineRegion(RpcController controller, return null; } + @Override + public GetCachedFilesListResponse getCachedFilesList(RpcController controller, + GetCachedFilesListRequest request) throws ServiceException { + return null; + } + @Override public List getRegions() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 02087fb0a661..f98a1c78ad5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -969,6 +969,11 @@ public void flushMasterStore() throws IOException { admin.flushMasterStore(); } + @Override + public List getCachedFilesList(ServerName serverName) throws IOException { + return admin.getCachedFilesList(serverName); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 70ce37faf47e..c88572b9f3c7 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1345,6 +1345,11 @@ public void flushMasterStore() throws IOException { throw new NotImplementedException("flushMasterStore not supported in ThriftAdmin"); } + @Override + public List getCachedFilesList(ServerName serverName) throws IOException { + throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { From 027a119bcf495e18b3e0fa4984f8982b9ed1390d Mon Sep 17 00:00:00 2001 From: chaijunjie0101 <64140218+chaijunjie0101@users.noreply.github.com> Date: Mon, 6 Nov 2023 10:32:19 +0800 Subject: [PATCH 128/514] HBASE-28185 Alter table to set TTL using hbase shell failed when ttl string is not match format (#5494) Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index f73064f70a8e..1b19bd25287c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -184,7 +184,11 @@ private static long humanReadableIntervalToSec(final String humanReadableInterva hours = matcher.group(6); minutes = matcher.group(8); seconds = matcher.group(10); + } else { + LOG.warn("Given interval value '{}' is not a number and does not match human readable format," + + " value will be set to 0.", humanReadableInterval); } + ttl = 0; ttl += days != null ? Long.parseLong(days) * HConstants.DAY_IN_SECONDS : 0; ttl += hours != null ? Long.parseLong(hours) * HConstants.HOUR_IN_SECONDS : 0; From 7af61794a8eee25d9e48a2561dd0a6a321f48c0c Mon Sep 17 00:00:00 2001 From: Rushabh Shah Date: Tue, 7 Nov 2023 09:58:58 -0800 Subject: [PATCH 129/514] HBASE-28184 Tailing the WAL is very slow if there are multiple peers (#5503) --- .../regionserver/WALEntryStream.java | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index d1f85774a635..186d5b7c4d18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -460,17 +460,27 @@ private void dequeueCurrentLog() { * Returns whether the file is opened for writing. */ private Pair readNextEntryAndRecordReaderPosition() { - // we must call this before actually reading from the reader, as this method will acquire the - // rollWriteLock. This is very important, as we will enqueue the new WAL file in postLogRoll, - // and before this happens, we could have already finished closing the previous WAL file. If we - // do not acquire the rollWriteLock and return whether the current file is being written to, we - // may finish reading the previous WAL file and start to read the next one, before it is - // enqueued into the logQueue, thus lead to an empty logQueue and make the shipper think the - // queue is already ended and quit. See HBASE-28114 and related issues for more details. - // in the future, if we want to optimize the logic here, for example, do not call this method - // every time, or do not acquire rollWriteLock in the implementation of this method, we need to - // carefully review the optimized implementation - OptionalLong fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath); + OptionalLong fileLength; + if (logQueue.getQueueSize(walGroupId) > 1) { + // if there are more than one files in queue, although it is possible that we are + // still trying to write the trailer of the file and it is not closed yet, we can + // make sure that we will not write any WAL entries to it any more, so it is safe + // to just let the upper layer try to read the whole file without limit + fileLength = OptionalLong.empty(); + } else { + // if there is only one file in queue, check whether it is still being written to + // we must call this before actually reading from the reader, as this method will acquire the + // rollWriteLock. This is very important, as we will enqueue the new WAL file in postLogRoll, + // and before this happens, we could have already finished closing the previous WAL file. If + // we do not acquire the rollWriteLock and return whether the current file is being written + // to, we may finish reading the previous WAL file and start to read the next one, before it + // is enqueued into the logQueue, thus lead to an empty logQueue and make the shipper think + // the queue is already ended and quit. See HBASE-28114 and related issues for more details. + // in the future, if we want to optimize the logic here, for example, do not call this method + // every time, or do not acquire rollWriteLock in the implementation of this method, we need + // to carefully review the optimized implementation + fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath); + } WALTailingReader.Result readResult = reader.next(fileLength.orElse(-1)); long readerPos = readResult.getEntryEndPos(); Entry readEntry = readResult.getEntry(); From 954a1f8fc392d19056cf11b896a188cda462c43b Mon Sep 17 00:00:00 2001 From: guluo Date: Wed, 8 Nov 2023 22:02:09 +0800 Subject: [PATCH 130/514] HBASE-28191 Meta browser can happen NPE when server or targetServer of region is null (#5508) Signed-off-by: Wellington Chevreuil Signed-off-by: Duo Zhang --- .../src/main/resources/hbase-webapps/master/table.jsp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 296e0e77f735..1d48a7561e1b 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -562,12 +562,13 @@ final RegionInfo regionInfo = regionReplicaInfo.getRegionInfo(); final ServerName serverName = regionReplicaInfo.getServerName(); final RegionState.State regionState = regionReplicaInfo.getRegionState(); - final int rsPort = master.getRegionServerInfoPort(serverName); final long seqNum = regionReplicaInfo.getSeqNum(); final String regionSpanFormat = "%s"; - final String targetServerName = regionReplicaInfo.getTargetServerName().toString(); + final String targetServerName = regionReplicaInfo.getTargetServerName() != null + ? regionReplicaInfo.getTargetServerName().toString() + : ""; final Map mergeRegions = regionReplicaInfo.getMergeRegionInfo(); final String mergeRegionNames = (mergeRegions == null) ? "" : mergeRegions.entrySet().stream() @@ -585,7 +586,7 @@ <%= endKeyDisplay %> <%= replicaIdDisplay %> <%= regionStateDisplay %> - "><%= buildRegionLink(serverName, rsPort, regionInfo, regionState) %> + "><%= serverName != null ? buildRegionLink(serverName, master.getRegionServerInfoPort(serverName), regionInfo, regionState) : "" %> <%= seqNum %> <%= targetServerName %> <%= mergeRegionNames %> From d8b5198cfb50823577afd6a66c7fc5d401c825d9 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Wed, 8 Nov 2023 20:03:01 -0500 Subject: [PATCH 131/514] HBASE-28122: Support TLSv1.3 cipher suites (#5444) Co-authored-by: Charles Connell Signed-off-by: Duo Zhang --- .../hadoop/hbase/io/crypto/tls/X509Util.java | 39 ++++++++++++------- .../hbase/io/crypto/tls/TestX509Util.java | 8 ++-- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java index 7d16a82b1f3e..41acfbbf48f4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java @@ -115,6 +115,10 @@ public final class X509Util { "hbase.client.netty.tls.handshaketimeout"; public static final int DEFAULT_HANDSHAKE_DETECTION_TIMEOUT_MILLIS = 5000; + private static String[] getTls13Ciphers() { + return new String[] { "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384" }; + } + private static String[] getGCMCiphers() { return new String[] { "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", @@ -136,14 +140,17 @@ private static String[] getCBCCiphers() { // Note that this performance assumption might not hold true for architectures other than x86_64. private static final String[] DEFAULT_CIPHERS_JAVA9 = ObjectArrays.concat(getGCMCiphers(), getCBCCiphers(), String.class); + private static final String[] DEFAULT_CIPHERS_JAVA11 = + ObjectArrays.concat(ObjectArrays.concat(getTls13Ciphers(), getGCMCiphers(), String.class), + getCBCCiphers(), String.class); private static final String[] DEFAULT_CIPHERS_OPENSSL = getOpenSslFilteredDefaultCiphers(); /** * Not all of our default ciphers are available in OpenSSL. Takes our default cipher lists and - * filters them to only those available in OpenSsl. Does GCM first, then CBC because GCM tends to - * be better and faster, and we don't need to worry about the java8 vs 9 performance issue if - * OpenSSL is handling it. + * filters them to only those available in OpenSsl. Prefers TLS 1.3, then GCM, then CBC because + * GCM tends to be better and faster, and we don't need to worry about the java8 vs 9 performance + * issue if OpenSSL is handling it. */ private static String[] getOpenSslFilteredDefaultCiphers() { if (!OpenSsl.isAvailable()) { @@ -152,16 +159,9 @@ private static String[] getOpenSslFilteredDefaultCiphers() { Set openSslSuites = OpenSsl.availableJavaCipherSuites(); List defaultSuites = new ArrayList<>(); - for (String cipher : getGCMCiphers()) { - if (openSslSuites.contains(cipher)) { - defaultSuites.add(cipher); - } - } - for (String cipher : getCBCCiphers()) { - if (openSslSuites.contains(cipher)) { - defaultSuites.add(cipher); - } - } + Arrays.stream(getTls13Ciphers()).filter(openSslSuites::contains).forEach(defaultSuites::add); + Arrays.stream(getGCMCiphers()).filter(openSslSuites::contains).forEach(defaultSuites::add); + Arrays.stream(getCBCCiphers()).filter(openSslSuites::contains).forEach(defaultSuites::add); return defaultSuites.toArray(new String[0]); } @@ -219,10 +219,19 @@ static String[] getDefaultCipherSuites(boolean useOpenSsl) { static String[] getDefaultCipherSuitesForJavaVersion(String javaVersion) { Objects.requireNonNull(javaVersion); + if (javaVersion.matches("\\d+")) { // Must be Java 9 or later - LOG.debug("Using Java9+ optimized cipher suites for Java version {}", javaVersion); - return DEFAULT_CIPHERS_JAVA9; + int javaVersionInt = Integer.parseInt(javaVersion); + if (javaVersionInt >= 11) { + LOG.debug( + "Using Java11+ optimized cipher suites for Java version {}, including TLSv1.3 support", + javaVersion); + return DEFAULT_CIPHERS_JAVA11; + } else { + LOG.debug("Using Java9+ optimized cipher suites for Java version {}", javaVersion); + return DEFAULT_CIPHERS_JAVA9; + } } else if (javaVersion.startsWith("1.")) { // Must be Java 1.8 or earlier LOG.debug("Using Java8 optimized cipher suites for Java version {}", javaVersion); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestX509Util.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestX509Util.java index dd81403af6f2..dd43f8be5cb8 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestX509Util.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestX509Util.java @@ -379,21 +379,21 @@ public void testGetDefaultCipherSuitesJava8() { public void testGetDefaultCipherSuitesJava9() { String[] cipherSuites = X509Util.getDefaultCipherSuitesForJavaVersion("9"); // Java 9+ default should have the GCM suites first - assertThat(cipherSuites[0], containsString("GCM")); + assertEquals(cipherSuites[0], "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"); } @Test public void testGetDefaultCipherSuitesJava10() { String[] cipherSuites = X509Util.getDefaultCipherSuitesForJavaVersion("10"); // Java 9+ default should have the GCM suites first - assertThat(cipherSuites[0], containsString("GCM")); + assertEquals(cipherSuites[0], "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"); } @Test public void testGetDefaultCipherSuitesJava11() { String[] cipherSuites = X509Util.getDefaultCipherSuitesForJavaVersion("11"); - // Java 9+ default should have the GCM suites first - assertThat(cipherSuites[0], containsString("GCM")); + // Java 11+ default should have the TLSv1.3 suites first + assertThat(cipherSuites[0], containsString("TLS_AES_128_GCM")); } @Test From 4b5db21f3fb94779d881e8b53638edb8fe497349 Mon Sep 17 00:00:00 2001 From: GeorryHuang Date: Thu, 9 Nov 2023 11:28:56 +0800 Subject: [PATCH 132/514] HBASE-25549 Provide a switch that allows avoiding reopening all regions when modifying a table to prevent RIT storms. (#2924) Co-authored-by: Huangzhuoyue Signed-off-by: Bryan Beaudreault Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani Signed-off-by: Bharath Vissapragada Signed-off-by: Esteban Gutierrez Reviewed-by: Sean Busbey Reviewed-by: Gourab Taparia --- .../org/apache/hadoop/hbase/client/Admin.java | 19 ++- .../hbase/client/AdminOverAsyncAdmin.java | 8 +- .../hadoop/hbase/client/AsyncAdmin.java | 15 ++- .../hadoop/hbase/client/AsyncHBaseAdmin.java | 7 +- .../hbase/client/RawAsyncHBaseAdmin.java | 7 +- .../shaded/protobuf/RequestConverter.java | 4 +- .../main/protobuf/server/master/Master.proto | 1 + .../server/master/MasterProcedure.proto | 1 + .../apache/hadoop/hbase/master/HMaster.java | 15 ++- .../hbase/master/MasterRpcServices.java | 3 +- .../hadoop/hbase/master/MasterServices.java | 13 +- .../procedure/ModifyTableProcedure.java | 87 +++++++++++-- .../hbase/master/MockNoopMasterServices.java | 6 + .../procedure/TestModifyTableProcedure.java | 114 ++++++++++++++++++ .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 7 +- hbase-shell/src/main/ruby/hbase/admin.rb | 19 ++- hbase-shell/src/main/ruby/hbase_constants.rb | 1 + .../src/main/ruby/shell/commands/alter.rb | 21 ++++ .../hbase/thrift2/client/ThriftAdmin.java | 5 + pom.xml | 2 + 20 files changed, 332 insertions(+), 23 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 1f22c0fe0a86..c0e2994c3c5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1058,7 +1058,24 @@ default void modifyTable(TableDescriptor td) throws IOException { * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the * operation to complete */ - Future modifyTableAsync(TableDescriptor td) throws IOException; + default Future modifyTableAsync(TableDescriptor td) throws IOException { + return modifyTableAsync(td, true); + } + + /** + * The same as {@link #modifyTableAsync(TableDescriptor td)}, except for the reopenRegions + * parameter, which controls whether the process of modifying the table should reopen all regions. + * @param td description of the table + * @param reopenRegions By default, 'modifyTable' reopens all regions, potentially causing a RIT + * (Region In Transition) storm in large tables. If set to 'false', regions + * will remain unaware of the modification until they are individually + * reopened. Please note that this may temporarily result in configuration + * inconsistencies among regions. + * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete + * @throws IOException if a remote or network exception occurs + */ + Future modifyTableAsync(TableDescriptor td, boolean reopenRegions) throws IOException; /** * Change the store file tracker of the given table. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 9c8f03a1057c..c13dfc33e3d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -502,7 +502,13 @@ public Future truncateRegionAsync(byte[] regionName) { @Override public Future modifyTableAsync(TableDescriptor td) throws IOException { - return admin.modifyTable(td); + return modifyTableAsync(td, true); + } + + @Override + public Future modifyTableAsync(TableDescriptor td, boolean reopenRegions) + throws IOException { + return admin.modifyTable(td, reopenRegions); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 313d2f01c881..bdb0228d9687 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -198,7 +198,20 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ * Modify an existing table, more IRB friendly version. * @param desc modified description of the table */ - CompletableFuture modifyTable(TableDescriptor desc); + default CompletableFuture modifyTable(TableDescriptor desc) { + return modifyTable(desc, true); + } + + /** + * Modify an existing table, more IRB friendly version. + * @param desc description of the table + * @param reopenRegions By default, 'modifyTable' reopens all regions, potentially causing a RIT + * (Region In Transition) storm in large tables. If set to 'false', regions + * will remain unaware of the modification until they are individually + * reopened. Please note that this may temporarily result in configuration + * inconsistencies among regions. + */ + CompletableFuture modifyTable(TableDescriptor desc, boolean reopenRegions); /** * Change the store file tracker of the given table. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 247acf485889..69f353600036 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -153,7 +153,12 @@ public CompletableFuture createTable(TableDescriptor desc, byte[][] splitK @Override public CompletableFuture modifyTable(TableDescriptor desc) { - return wrap(rawAdmin.modifyTable(desc)); + return modifyTable(desc, true); + } + + @Override + public CompletableFuture modifyTable(TableDescriptor desc, boolean reopenRegions) { + return wrap(rawAdmin.modifyTable(desc, reopenRegions)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 92a95d491f0d..103a64e520a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -694,9 +694,14 @@ private CompletableFuture createTable(TableName tableName, CreateTableRequ @Override public CompletableFuture modifyTable(TableDescriptor desc) { + return modifyTable(desc, true); + } + + @Override + public CompletableFuture modifyTable(TableDescriptor desc, boolean reopenRegions) { return this. procedureCall(desc.getTableName(), RequestConverter.buildModifyTableRequest(desc.getTableName(), desc, ng.getNonceGroup(), - ng.newNonce()), + ng.newNonce(), reopenRegions), (s, c, req, done) -> s.modifyTable(c, req, done), (resp) -> resp.getProcId(), new ModifyTableProcedureBiConsumer(this, desc.getTableName())); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index c29aacfc5ee1..377b46494633 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1108,12 +1108,14 @@ public static CreateTableRequest buildCreateTableRequest(final TableDescriptor t * @return a ModifyTableRequest */ public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, - final TableDescriptor tableDesc, final long nonceGroup, final long nonce) { + final TableDescriptor tableDesc, final long nonceGroup, final long nonce, + final boolean reopenRegions) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); + builder.setReopenRegions(reopenRegions); return builder.build(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index f66f3b983668..b1e750f4d920 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -204,6 +204,7 @@ message ModifyTableRequest { required TableSchema table_schema = 2; optional uint64 nonce_group = 3 [default = 0]; optional uint64 nonce = 4 [default = 0]; + optional bool reopen_regions = 5 [default = true]; } message ModifyTableResponse { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 7d5ed9d714ec..6c5501c9d0d6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -82,6 +82,7 @@ message ModifyTableStateData { required TableSchema modified_table_schema = 3; required bool delete_column_family_in_modify = 4; optional bool should_check_descriptor = 5; + optional bool reopen_regions = 6; } enum TruncateTableState { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3c433f11a689..0dca3a0111e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1164,7 +1164,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(replicasNumInConf).build(), - null, metaDesc, false)); + null, metaDesc, false, true)); } } } @@ -2794,6 +2794,13 @@ protected String getDescription() { private long modifyTable(final TableName tableName, final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, final boolean shouldCheckDescriptor) throws IOException { + return modifyTable(tableName, newDescriptorGetter, nonceGroup, nonce, shouldCheckDescriptor, + true); + } + + private long modifyTable(final TableName tableName, + final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, + final boolean shouldCheckDescriptor, final boolean reopenRegions) throws IOException { return MasterProcedureUtil .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override @@ -2812,7 +2819,7 @@ protected void run() throws IOException { // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), - newDescriptor, latch, oldDescriptor, shouldCheckDescriptor)); + newDescriptor, latch, oldDescriptor, shouldCheckDescriptor, reopenRegions)); latch.await(); getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor, @@ -2829,14 +2836,14 @@ protected String getDescription() { @Override public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor, - final long nonceGroup, final long nonce) throws IOException { + final long nonceGroup, final long nonce, final boolean reopenRegions) throws IOException { checkInitialized(); return modifyTable(tableName, new TableDescriptorGetter() { @Override public TableDescriptor get() throws IOException { return newDescriptor; } - }, nonceGroup, nonce, false); + }, nonceGroup, nonce, false, reopenRegions); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 9ff69d436469..6d330d6eb791 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1544,7 +1544,8 @@ public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequ throws ServiceException { try { long procId = server.modifyTable(ProtobufUtil.toTableName(req.getTableName()), - ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce()); + ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce(), + req.getReopenRegions()); return ModifyTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 2a244cb3aa47..b5e25bb44f33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -158,8 +158,19 @@ public long truncateTable(final TableName tableName, final boolean preserveSplit * @param tableName The table name * @param descriptor The updated table descriptor */ + default long modifyTable(final TableName tableName, final TableDescriptor descriptor, + final long nonceGroup, final long nonce) throws IOException { + return modifyTable(tableName, descriptor, nonceGroup, nonce, true); + } + + /** + * Modify the descriptor of an existing table + * @param tableName The table name + * @param descriptor The updated table descriptor + * @param reopenRegions Whether to reopen regions after modifying the table descriptor + */ long modifyTable(final TableName tableName, final TableDescriptor descriptor, - final long nonceGroup, final long nonce) throws IOException; + final long nonceGroup, final long nonce, final boolean reopenRegions) throws IOException; /** * Modify the store file tracker of an existing table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 28f955126bcd..ff0d7d2cc94b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Supplier; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; @@ -56,6 +58,7 @@ public class ModifyTableProcedure extends AbstractStateMachineTableProcedure s = new HashSet<>(Arrays.asList(TableDescriptorBuilder.REGION_REPLICATION, + TableDescriptorBuilder.REGION_MEMSTORE_REPLICATION, RSGroupInfo.TABLE_DESC_PROP_GROUP)); + for (String k : s) { + if ( + isTablePropertyModified(this.unmodifiedTableDescriptor, this.modifiedTableDescriptor, k) + ) { + throw new HBaseIOException( + "Can not modify " + k + " of a table when modification won't reopen regions"); + } + } + } + } + + /** + * Comparing the value associated with a given key across two TableDescriptor instances' + * properties. + * @return True if the table property key is the same in both. + */ + private boolean isTablePropertyModified(TableDescriptor oldDescriptor, + TableDescriptor newDescriptor, String key) { + String oldV = oldDescriptor.getValue(key); + String newV = newDescriptor.getValue(key); + if (oldV == null && newV == null) { + return false; + } else if (oldV != null && newV != null && oldV.equals(newV)) { + return false; + } + return true; } private void initialize(final TableDescriptor unmodifiedTableDescriptor, @@ -125,7 +183,13 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_PRE_OPERATION: preModify(env, state); - setNextState(ModifyTableState.MODIFY_TABLE_CLOSE_EXCESS_REPLICAS); + // We cannot allow changes to region replicas when 'reopenRegions==false', + // as this mode bypasses the state management required for modifying region replicas. + if (reopenRegions) { + setNextState(ModifyTableState.MODIFY_TABLE_CLOSE_EXCESS_REPLICAS); + } else { + setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR); + } break; case MODIFY_TABLE_CLOSE_EXCESS_REPLICAS: if (isTableEnabled(env)) { @@ -135,7 +199,11 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR: updateTableDescriptor(env); - setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); + if (reopenRegions) { + setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); + } else { + setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); + } break; case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: removeReplicaColumnsIfNeeded(env); @@ -143,7 +211,11 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_POST_OPERATION: postModify(env, state); - setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); + if (reopenRegions) { + setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); + } else { + return Flow.NO_MORE_STATE; + } break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: if (isTableEnabled(env)) { @@ -238,7 +310,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)) .setDeleteColumnFamilyInModify(deleteColumnFamilyInModify) - .setShouldCheckDescriptor(shouldCheckDescriptor); + .setShouldCheckDescriptor(shouldCheckDescriptor).setReopenRegions(reopenRegions); if (unmodifiedTableDescriptor != null) { modifyTableMsg @@ -260,6 +332,7 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify(); shouldCheckDescriptor = modifyTableMsg.hasShouldCheckDescriptor() ? modifyTableMsg.getShouldCheckDescriptor() : false; + reopenRegions = modifyTableMsg.hasReopenRegions() ? modifyTableMsg.getReopenRegions() : true; if (modifyTableMsg.hasUnmodifiedTableSchema()) { unmodifiedTableDescriptor = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 7faa7750cdff..0c4f3d7db266 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -263,6 +263,12 @@ public long modifyTable(final TableName tableName, final TableDescriptor descrip return -1; } + @Override + public long modifyTable(TableName tableName, TableDescriptor descriptor, long nonceGroup, + long nonce, boolean reopenRegions) throws IOException { + return -1; + } + @Override public long enableTable(final TableName tableName, final long nonceGroup, final long nonce) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index 51be6385319f..c8043b8ee3b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -25,18 +25,22 @@ import org.apache.hadoop.hbase.ConcurrentTableModificationException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility.StepHook; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; @@ -584,4 +588,114 @@ public void run() { t2.join(); assertFalse("Expected ConcurrentTableModificationException.", (t1.exception || t2.exception)); } + + @Test + public void testModifyWillNotReopenRegions() throws IOException { + final boolean reopenRegions = false; + final TableName tableName = TableName.valueOf(name.getMethodName()); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf"); + + // Test 1: Modify table without reopening any regions + TableDescriptor htd = UTIL.getAdmin().getDescriptor(tableName); + TableDescriptor modifiedDescriptor = TableDescriptorBuilder.newBuilder(htd) + .setValue("test" + ".hbase.conf", "test.hbase.conf.value").build(); + long procId1 = ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure( + procExec.getEnvironment(), modifiedDescriptor, null, htd, false, reopenRegions)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1)); + TableDescriptor currentHtd = UTIL.getAdmin().getDescriptor(tableName); + assertEquals("test.hbase.conf.value", currentHtd.getValue("test.hbase.conf")); + // Regions should not aware of any changes. + for (HRegion r : UTIL.getHBaseCluster().getRegions(tableName)) { + Assert.assertNull(r.getTableDescriptor().getValue("test.hbase.conf")); + } + // Force regions to reopen + for (HRegion r : UTIL.getHBaseCluster().getRegions(tableName)) { + getMaster().getAssignmentManager().move(r.getRegionInfo()); + } + // After the regions reopen, ensure that the configuration is updated. + for (HRegion r : UTIL.getHBaseCluster().getRegions(tableName)) { + assertEquals("test.hbase.conf.value", r.getTableDescriptor().getValue("test.hbase.conf")); + } + + // Test 2: Modifying region replication is not allowed + htd = UTIL.getAdmin().getDescriptor(tableName); + long oldRegionReplication = htd.getRegionReplication(); + modifiedDescriptor = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(3).build(); + try { + ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure( + procExec.getEnvironment(), modifiedDescriptor, null, htd, false, reopenRegions)); + Assert.fail( + "An exception should have been thrown while modifying region replication properties."); + } catch (HBaseIOException e) { + assertTrue(e.getMessage().contains("Can not modify")); + } + currentHtd = UTIL.getAdmin().getDescriptor(tableName); + // Nothing changed + assertEquals(oldRegionReplication, currentHtd.getRegionReplication()); + + // Test 3: Adding CFs is not allowed + htd = UTIL.getAdmin().getDescriptor(tableName); + modifiedDescriptor = TableDescriptorBuilder.newBuilder(htd) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder("NewCF".getBytes()).build()) + .build(); + try { + ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure( + procExec.getEnvironment(), modifiedDescriptor, null, htd, false, reopenRegions)); + Assert.fail("Should have thrown an exception while modifying CF!"); + } catch (HBaseIOException e) { + assertTrue(e.getMessage().contains("Cannot add or remove column families")); + } + currentHtd = UTIL.getAdmin().getDescriptor(tableName); + Assert.assertNull(currentHtd.getColumnFamily("NewCF".getBytes())); + + // Test 4: Modifying CF property is allowed + htd = UTIL.getAdmin().getDescriptor(tableName); + modifiedDescriptor = + TableDescriptorBuilder + .newBuilder(htd).modifyColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder("cf".getBytes()).setCompressionType(Compression.Algorithm.SNAPPY).build()) + .build(); + ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure( + procExec.getEnvironment(), modifiedDescriptor, null, htd, false, reopenRegions)); + for (HRegion r : UTIL.getHBaseCluster().getRegions(tableName)) { + Assert.assertEquals(Compression.Algorithm.NONE, + r.getTableDescriptor().getColumnFamily("cf".getBytes()).getCompressionType()); + } + for (HRegion r : UTIL.getHBaseCluster().getRegions(tableName)) { + getMaster().getAssignmentManager().move(r.getRegionInfo()); + } + for (HRegion r : UTIL.getHBaseCluster().getRegions(tableName)) { + Assert.assertEquals(Compression.Algorithm.SNAPPY, + r.getTableDescriptor().getColumnFamily("cf".getBytes()).getCompressionType()); + } + + // Test 5: Modifying coprocessor is not allowed + htd = UTIL.getAdmin().getDescriptor(tableName); + modifiedDescriptor = + TableDescriptorBuilder.newBuilder(htd).setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder("any.coprocessor.name").setJarPath("fake/path").build()).build(); + try { + ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure( + procExec.getEnvironment(), modifiedDescriptor, null, htd, false, reopenRegions)); + Assert.fail("Should have thrown an exception while modifying coprocessor!"); + } catch (HBaseIOException e) { + assertTrue(e.getMessage().contains("Can not modify Coprocessor")); + } + currentHtd = UTIL.getAdmin().getDescriptor(tableName); + assertEquals(0, currentHtd.getCoprocessorDescriptors().size()); + + // Test 6: Modifying is not allowed + htd = UTIL.getAdmin().getDescriptor(tableName); + modifiedDescriptor = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(3).build(); + try { + ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure( + procExec.getEnvironment(), modifiedDescriptor, null, htd, false, reopenRegions)); + Assert.fail("Should have thrown an exception while modifying coprocessor!"); + } catch (HBaseIOException e) { + System.out.println(e.getMessage()); + assertTrue(e.getMessage().contains("Can not modify REGION_REPLICATION")); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index f98a1c78ad5f..35c868413e19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -423,7 +423,12 @@ public Future truncateRegionAsync(byte[] regionName) throws IOException { } public Future modifyTableAsync(TableDescriptor td) throws IOException { - return admin.modifyTableAsync(td); + return modifyTableAsync(td, true); + } + + public Future modifyTableAsync(TableDescriptor td, boolean reopenRegions) + throws IOException { + return admin.modifyTableAsync(td, reopenRegions); } public void shutdown() throws IOException { diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 453b7ae1af6e..5c6d778bcf37 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -784,6 +784,7 @@ def alter(table_name_str, wait = true, *args) # Get table descriptor tdb = TableDescriptorBuilder.newBuilder(@admin.getDescriptor(table_name)) hasTableUpdate = false + reopen_regions = true # Process all args args.each do |arg| @@ -793,6 +794,14 @@ def alter(table_name_str, wait = true, *args) # Normalize args to support shortcut delete syntax arg = { METHOD => 'delete', NAME => arg['delete'] } if arg['delete'] + if arg.key?(REOPEN_REGIONS) + if !['true', 'false'].include?(arg[REOPEN_REGIONS].downcase) + raise(ArgumentError, "Invalid 'REOPEN_REGIONS' for non-boolean value.") + end + reopen_regions = JBoolean.valueOf(arg[REOPEN_REGIONS]) + arg.delete(REOPEN_REGIONS) + end + # There are 3 possible options. # 1) Column family spec. Distinguished by having a NAME and no METHOD. method = arg.delete(METHOD) @@ -916,9 +925,13 @@ def alter(table_name_str, wait = true, *args) # Bulk apply all table modifications. if hasTableUpdate - future = @admin.modifyTableAsync(tdb.build) - - if wait == true + future = @admin.modifyTableAsync(tdb.build, reopen_regions) + if reopen_regions == false + puts("WARNING: You are using REOPEN_REGIONS => 'false' to modify a table, which will + result in inconsistencies in the configuration of online regions and other risks. If you + encounter any issues, use the original 'alter' command to make the modification again!") + future.get + elsif wait == true puts 'Updating all regions with the new schema...' future.get end diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index 5f994c7b5ae0..d4df1f8f5821 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -107,6 +107,7 @@ module HBaseConstants VALUE = 'VALUE'.freeze VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS VISIBILITY = 'VISIBILITY'.freeze + REOPEN_REGIONS = 'REOPEN_REGIONS'.freeze # aliases ENDKEY = STOPROW diff --git a/hbase-shell/src/main/ruby/shell/commands/alter.rb b/hbase-shell/src/main/ruby/shell/commands/alter.rb index ad0cb5a5a49b..18ec24be7be6 100644 --- a/hbase-shell/src/main/ruby/shell/commands/alter.rb +++ b/hbase-shell/src/main/ruby/shell/commands/alter.rb @@ -72,6 +72,27 @@ def help hbase> alter 't1', CONFIGURATION => {'hbase.hregion.scan.loadColumnFamiliesOnDemand' => 'true'} hbase> alter 't1', {NAME => 'f2', CONFIGURATION => {'hbase.hstore.blockingStoreFiles' => '10'}} +You can also set configuration setting with REOPEN_REGIONS=>'false' to avoid regions RIT, which +let the modification take effect after regions was reopened (Be careful, the regions of the table +may be configured inconsistently If regions are not reopened after the modification) + + hbase> alter 't1', REOPEN_REGIONS => 'false', MAX_FILESIZE => '134217728' + hbase> alter 't1', REOPEN_REGIONS => 'false', CONFIGURATION => {'hbase.hregion.scan + .loadColumnFamiliesOnDemand' => 'true'} + +However, be aware that: +1. Inconsistency Risks: If the regions are not reopened after the modification, the table's regions +may become inconsistently configured. Ensure that you manually reopen the regions as soon as +possible to apply the changes consistently across the entire table. +2. If changes are made to the table without reopening the regions, we currently only allow +lightweight operations. The following types of changes, which may lead to unknown situations, +will throw an exception: + a. Adding or removing CFs, coprocessors. + b. Modifying the table name. + c. Changing region replica related configurations such as 'REGION_REPLICATION' + and 'REGION_MEMSTORE_REPLICATION'. + d. Changing the rsgroup. + You can also unset configuration settings specific to this table: hbase> alter 't1', METHOD => 'table_conf_unset', NAME => 'hbase.hregion.majorcompaction' diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index c88572b9f3c7..0eff84bba7c8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -745,6 +745,11 @@ public Future modifyTableAsync(TableDescriptor td) { throw new NotImplementedException("modifyTableAsync not supported in ThriftAdmin"); } + @Override + public Future modifyTableAsync(TableDescriptor td, boolean reopenRegions) { + throw new NotImplementedException("modifyTableAsync not supported in ThriftAdmin"); + } + @Override public void shutdown() { throw new NotImplementedException("shutdown not supported in ThriftAdmin"); diff --git a/pom.xml b/pom.xml index aedb08077d56..28f57084c97a 100644 --- a/pom.xml +++ b/pom.xml @@ -2817,6 +2817,7 @@ **/generated/* **/package-info.java + **/.idea/** From 8821c6aac2aaffdf947538b87b1fd901849ba40f Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Thu, 9 Nov 2023 09:58:38 +0000 Subject: [PATCH 133/514] HBASE-28189 Fix the miss count in one of CombinedBlockCache getBlock implementations (#5506) Signed-off-by: Peter Somogyi --- .../hbase/io/hfile/CombinedBlockCache.java | 61 ++++++++++++++++--- .../io/hfile/TestCombinedBlockCache.java | 48 ++++++++++++++- 2 files changed, 99 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 57c103562d70..a421dfc83aa0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * CombinedBlockCache is an abstraction layer that combines {@link FirstLevelBlockCache} and @@ -38,6 +40,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { protected final BlockCache l2Cache; protected final CombinedCacheStats combinedCacheStats; + private static final Logger LOG = LoggerFactory.getLogger(CombinedBlockCache.class); + public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) { this.l1Cache = l1Cache; this.l2Cache = l2Cache; @@ -77,16 +81,49 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) { - // We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting - // passed always. + Cacheable block = null; + // We don't know the block type. We should try to get it on one of the caches only, + // but not both otherwise we'll over compute on misses. Here we check if the key is on L1, + // if so, call getBlock on L1 and that will compute the hit. Otherwise, we'll try to get it from + // L2 and whatever happens, we'll update the stats there. boolean existInL1 = l1Cache.containsBlock(cacheKey); - if (!existInL1 && updateCacheMetrics && !repeat) { - // If the block does not exist in L1, the containsBlock should be counted as one miss. - l1Cache.getStats().miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + // if we know it's in L1, just delegate call to l1 and return it + if (existInL1) { + block = l1Cache.getBlock(cacheKey, caching, repeat, false); + } else { + block = l2Cache.getBlock(cacheKey, caching, repeat, false); + } + if (updateCacheMetrics) { + boolean metaBlock = isMetaBlock(cacheKey.getBlockType()); + if (metaBlock) { + if (!existInL1 && block != null) { + LOG.warn("Cache key {} had block type {}, but was found in L2 cache.", cacheKey, + cacheKey.getBlockType()); + updateBlockMetrics(block, cacheKey, l2Cache, caching); + } else { + updateBlockMetrics(block, cacheKey, l1Cache, caching); + } + } else { + if (existInL1) { + LOG.warn("Cache key {} had block type {}, but was found in L1 cache.", cacheKey, + cacheKey.getBlockType()); + updateBlockMetrics(block, cacheKey, l1Cache, caching); + } else { + updateBlockMetrics(block, cacheKey, l2Cache, caching); + } + } + } + return block; + } + + private void updateBlockMetrics(Cacheable block, BlockCacheKey key, BlockCache cache, + boolean caching) { + if (block == null) { + cache.getStats().miss(caching, key.isPrimary(), key.getBlockType()); + } else { + cache.getStats().hit(caching, key.isPrimary(), key.getBlockType()); + } - return existInL1 - ? l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics) - : l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override @@ -95,7 +132,13 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea if (blockType == null) { return getBlock(cacheKey, caching, repeat, updateCacheMetrics); } - boolean metaBlock = isMetaBlock(blockType); + cacheKey.setBlockType(blockType); + return getBlockWithType(cacheKey, caching, repeat, updateCacheMetrics); + } + + private Cacheable getBlockWithType(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { + boolean metaBlock = isMetaBlock(cacheKey.getBlockType()); if (metaBlock) { return l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java index 2a839ea91212..b9bca1ba6b4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java @@ -19,12 +19,16 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; +import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; import static org.junit.Assert.assertEquals; +import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache.CombinedCacheStats; +import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; import org.junit.ClassRule; @@ -110,11 +114,53 @@ public void testCombinedCacheStats() { @Test public void testMultiThreadGetAndEvictBlock() throws Exception { + BlockCache blockCache = createCombinedBlockCache(); + TestLruBlockCache.testMultiThreadGetAndEvictBlockInternal(blockCache); + } + + @Test + public void testCombinedBlockCacheStatsWithDataBlockType() throws Exception { + testCombinedBlockCacheStats(BlockType.DATA, 0, 1); + } + + @Test + public void testCombinedBlockCacheStatsWithMetaBlockType() throws Exception { + testCombinedBlockCacheStats(BlockType.META, 1, 0); + } + + @Test + public void testCombinedBlockCacheStatsWithNoBlockType() throws Exception { + testCombinedBlockCacheStats(null, 0, 1); + } + + private CombinedBlockCache createCombinedBlockCache() { Configuration conf = UTIL.getConfiguration(); conf.set(BUCKET_CACHE_IOENGINE_KEY, "offheap"); conf.setInt(BUCKET_CACHE_SIZE_KEY, 32); BlockCache blockCache = BlockCacheFactory.createBlockCache(conf); Assert.assertTrue(blockCache instanceof CombinedBlockCache); - TestLruBlockCache.testMultiThreadGetAndEvictBlockInternal(blockCache); + return (CombinedBlockCache) blockCache; + } + + public void testCombinedBlockCacheStats(BlockType type, int expectedL1Miss, int expectedL2Miss) + throws Exception { + CombinedBlockCache blockCache = createCombinedBlockCache(); + BlockCacheKey key = new BlockCacheKey("key1", 0, false, type); + int size = 100; + int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; + byte[] byteArr = new byte[length]; + HFileContext meta = new HFileContextBuilder().build(); + HFileBlock blk = new HFileBlock(type != null ? type : BlockType.DATA, size, size, -1, + ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, + HEAP); + blockCache.cacheBlock(key, blk); + blockCache.getBlock(key, true, false, true); + assertEquals(0, blockCache.getStats().getMissCount()); + blockCache.evictBlock(key); + blockCache.getBlock(key, true, false, true); + assertEquals(1, blockCache.getStats().getMissCount()); + assertEquals(expectedL1Miss, blockCache.getFirstLevelCache().getStats().getMissCount()); + assertEquals(expectedL2Miss, blockCache.getSecondLevelCache().getStats().getMissCount()); } + } From 5dc4467e6ca4db057f0c3d26287e4190a591cfea Mon Sep 17 00:00:00 2001 From: jbewing Date: Thu, 9 Nov 2023 09:14:02 -0500 Subject: [PATCH 134/514] HBASE-28043 Reduce seeks from beginning of block in StoreFileScanner.seekToPreviousRow (#5373) Signed-off-by: Bryan Beaudreault Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/PerformanceEvaluation.java | 45 ++++ .../hbase/regionserver/StoreFileReader.java | 4 +- .../hbase/regionserver/StoreFileScanner.java | 240 +++++++++++++++--- .../hbase/regionserver/TestHStoreFile.java | 4 +- 4 files changed, 251 insertions(+), 42 deletions(-) diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 8fd2d5f7fb26..e0040c1f178a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -185,6 +185,8 @@ public class PerformanceEvaluation extends Configured implements Tool { addCommandDescriptor(MetaWriteTest.class, "metaWrite", "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); + addCommandDescriptor(ReverseScanTest.class, "reverseScan", + "Run reverse scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", "Run scan test using a filter to find a specific row based on it's value " + "(make sure to use --rows=20)"); @@ -2104,6 +2106,49 @@ boolean testRow(final int i, final long startTime) throws IOException { } } + static class ReverseScanTest extends TableTest { + private ResultScanner testScanner; + + ReverseScanTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + } + + @Override + void testTakedown() throws IOException { + if (this.testScanner != null) { + this.testScanner.close(); + } + super.testTakedown(); + } + + @Override + boolean testRow(final int i, final long startTime) throws IOException { + if (this.testScanner == null) { + Scan scan = new Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) + .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) + .setScanMetricsEnabled(true).setReversed(true); + for (int family = 0; family < opts.families; family++) { + byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); + if (opts.addColumns) { + for (int column = 0; column < opts.columns; column++) { + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); + scan.addColumn(familyName, qualifier); + } + } else { + scan.addFamily(familyName); + } + } + if (opts.filterAll) { + scan.setFilter(new FilterAllFilter()); + } + this.testScanner = table.getScanner(scan); + } + Result r = testScanner.next(); + updateValueSize(r); + return true; + } + } + /** * Base class for operations that are CAS-like; that read a value and then set it based off what * they read. In this category is increment, append, checkAndPut, etc. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 72e93c3f75a4..09c379227bda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.BloomFilterMetrics; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -146,7 +147,8 @@ public CellComparator getComparator() { public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction, - reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn, + reader.getDataBlockEncoding() == DataBlockEncoding.ROW_INDEX_V1); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 5e666659c025..fd941de4df87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -61,6 +61,13 @@ public class StoreFileScanner implements KeyValueScanner { // A flag represents whether could stop skipping KeyValues for MVCC // if have encountered the next row. Only used for reversed scan private boolean stopSkippingKVsIfNextRow = false; + // A Cell that represents the row before the most previously seeked to row in seekToPreviousRow + private Cell previousRow = null; + // Whether the underlying HFile is using a data block encoding that has lower cost for seeking to + // a row from the beginning of a block (i.e. RIV1). If the data block encoding has a high cost for + // seeks, then we can use a modified reverse scanning algorithm to reduce seeks from the beginning + // of the block + private final boolean isFastSeekingEncoding; private static LongAdder seekCount; @@ -83,9 +90,13 @@ public class StoreFileScanner implements KeyValueScanner { * {@link KeyValueScanner#getScannerOrder()}. * @param canOptimizeForNonNullColumn {@code true} if we can make sure there is no null column, * otherwise {@code false}. This is a hint for optimization. + * @param isFastSeekingEncoding {@code true} if the data block encoding can seek quickly + * from the beginning of a block (i.e. RIV1), otherwise + * {@code false}. This is a hint for optimization. */ public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVCC, - boolean hasMVCC, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { + boolean hasMVCC, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn, + boolean isFastSeekingEncoding) { this.readPt = readPt; this.reader = reader; this.hfs = hfs; @@ -93,6 +104,7 @@ public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVC this.hasMVCCInfo = hasMVCC; this.scannerOrder = scannerOrder; this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn; + this.isFastSeekingEncoding = isFastSeekingEncoding; this.reader.incrementRefCount(); } @@ -226,6 +238,7 @@ public boolean seek(Cell key) throws IOException { } } finally { realSeekDone = true; + previousRow = null; } } catch (FileNotFoundException e) { throw e; @@ -253,6 +266,7 @@ public boolean reseek(Cell key) throws IOException { } } finally { realSeekDone = true; + previousRow = null; } } catch (FileNotFoundException e) { throw e; @@ -486,50 +500,198 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) @Override public boolean seekToPreviousRow(Cell originalKey) throws IOException { try { - try { - boolean keepSeeking = false; - Cell key = originalKey; - do { - Cell seekKey = PrivateCellUtil.createFirstOnRow(key); - if (seekCount != null) seekCount.increment(); - if (!hfs.seekBefore(seekKey)) { - this.cur = null; - return false; - } - Cell curCell = hfs.getCell(); - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(curCell); - - if (seekCount != null) seekCount.increment(); - if (!seekAtOrAfter(hfs, firstKeyOfPreviousRow)) { - this.cur = null; - return false; - } - - setCurrentCell(hfs.getCell()); - this.stopSkippingKVsIfNextRow = true; - boolean resultOfSkipKVs; - try { - resultOfSkipKVs = skipKVsNewerThanReadpoint(); - } finally { - this.stopSkippingKVsIfNextRow = false; - } - if (!resultOfSkipKVs || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { - keepSeeking = true; - key = firstKeyOfPreviousRow; - continue; - } else { - keepSeeking = false; - } - } while (keepSeeking); - return true; - } finally { - realSeekDone = true; + if (isFastSeekingEncoding) { + return seekToPreviousRowStateless(originalKey); + } else if (previousRow == null || getComparator().compareRows(previousRow, originalKey) > 0) { + return seekToPreviousRowWithoutHint(originalKey); + } else { + return seekToPreviousRowWithHint(); } } catch (FileNotFoundException e) { throw e; } catch (IOException ioe) { throw new IOException("Could not seekToPreviousRow " + this + " to key " + originalKey, ioe); + } finally { + this.realSeekDone = true; + } + } + + /** + * This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires one seek + * and one reseek. This method maintains state in {@link StoreFileScanner#previousRow} which only + * makes sense in the context of a sequential row-by-row reverse scan. + * {@link StoreFileScanner#previousRow} should be reset if that is not the case. The reasoning for + * why this method is faster than {@link StoreFileScanner#seekToPreviousRowStateless(Cell)} is + * that seeks are slower as they need to start from the beginning of the file, while reseeks go + * forward from the current position. + */ + private boolean seekToPreviousRowWithHint() throws IOException { + do { + // Using our existing seek hint, set our next seek hint + Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(previousRow); + seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow); + + // Reseek back to our initial seek hint (i.e. what we think is the start of the + // previous row) + if (!reseekAtOrAfter(firstKeyOfPreviousRow)) { + return false; + } + + // If after skipping newer Kvs, we're still in our seek hint row, then we're finished + if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) { + return true; + } + + // If the previousRow seek hint is missing, that means that we're at row after the first row + // in the storefile. Use the without-hint seek path to process the final row + if (previousRow == null) { + return seekToPreviousRowWithoutHint(firstKeyOfPreviousRow); + } + + // Otherwise, use the previousRow seek hint to continue traversing backwards + } while (true); + } + + /** + * This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires two seeks + * and one reseek. The extra expense/seek is with the intent of speeding up subsequent calls by + * using the {@link StoreFileScanner#seekToPreviousRowWithHint} which this method seeds the state + * for by setting {@link StoreFileScanner#previousRow} + */ + private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOException { + // Rewind to the cell before the beginning of this row + Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey); + if (!seekBefore(keyAtBeginningOfRow)) { + return false; + } + + // Rewind before this row and save what we find as a seek hint + Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); + seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow); + + // Seek back to the start of the previous row + if (!reseekAtOrAfter(firstKeyOfPreviousRow)) { + return false; + } + + // If after skipping newer Kvs, we're still in what we thought was the previous + // row, then we can exit + if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) { + return true; + } + + // Skipping newer kvs resulted in skipping the entire row that we thought was the + // previous row. If we've set a seek hint, then we can use that to go backwards + // further + if (previousRow != null) { + return seekToPreviousRowWithHint(); + } + + // If we've made it here, then we weren't able to set a seek hint. This can happen + // only if we're at the beginning of the storefile i.e. there is no row before this + // one + return false; + } + + /** + * This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires two seeks. + * It should be used if the cost for seeking is lower i.e. when using a fast seeking data block + * encoding like RIV1. + */ + private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException { + Cell key = originalKey; + do { + Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key); + if (!seekBefore(keyAtBeginningOfRow)) { + return false; + } + + Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); + if (!seekAtOrAfter(firstKeyOfPreviousRow)) { + return false; + } + + if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) { + return true; + } + key = firstKeyOfPreviousRow; + } while (true); + } + + private boolean seekBefore(Cell seekKey) throws IOException { + if (seekCount != null) { + seekCount.increment(); } + if (!hfs.seekBefore(seekKey)) { + this.cur = null; + return false; + } + + return true; + } + + /** + * Seeks before the seek target cell and saves the location to {@link #previousRow}. If there + * doesn't exist a KV in this file before the seek target cell, reposition the scanner at the + * beginning of the storefile (in preparation to a reseek at or after the seek key) and set the + * {@link #previousRow} to null. If {@link #previousRow} is ever non-null and then transitions to + * being null again via this method, that's because there doesn't exist a row before the seek + * target in the storefile (i.e. we're at the beginning of the storefile) + */ + private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException { + if (seekCount != null) { + seekCount.increment(); + } + if (!hfs.seekBefore(seekKey)) { + // Since the above seek failed, we need to position ourselves back at the start of the + // block or else our reseek might fail. seekTo() cannot return false here as at least + // one seekBefore will have returned true by the time we get here + hfs.seekTo(); + this.previousRow = null; + } else { + this.previousRow = hfs.getCell(); + } + } + + private boolean seekAtOrAfter(Cell seekKey) throws IOException { + if (seekCount != null) { + seekCount.increment(); + } + if (!seekAtOrAfter(hfs, seekKey)) { + this.cur = null; + return false; + } + + return true; + } + + private boolean reseekAtOrAfter(Cell seekKey) throws IOException { + if (seekCount != null) { + seekCount.increment(); + } + if (!reseekAtOrAfter(hfs, seekKey)) { + this.cur = null; + return false; + } + + return true; + } + + private boolean isStillAtSeekTargetAfterSkippingNewerKvs(Cell seekKey) throws IOException { + setCurrentCell(hfs.getCell()); + return skipKvsNewerThanReadpointReversed() && getComparator().compareRows(cur, seekKey) <= 0; + } + + private boolean skipKvsNewerThanReadpointReversed() throws IOException { + this.stopSkippingKVsIfNextRow = true; + boolean resultOfSkipKVs; + try { + resultOfSkipKVs = skipKVsNewerThanReadpoint(); + } finally { + this.stopSkippingKVsIfNextRow = false; + } + + return resultOfSkipKVs; } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index a0c23af5ef0d..aa7fb53566df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -280,7 +280,7 @@ public void testStoreFileReference() throws Exception { StoreFileReader r = file.getReader(); assertNotNull(r); StoreFileScanner scanner = - new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false); + new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false, false); // Verify after instantiating scanner refCount is increased assertTrue("Verify file is being referenced", file.isReferencedInReads()); @@ -297,7 +297,7 @@ public void testEmptyStoreFileRestrictKeyRanges() throws Exception { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf); when(store.getColumnFamilyDescriptor()).thenReturn(cfd); try (StoreFileScanner scanner = - new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) { + new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true, false)) { Scan scan = new Scan(); scan.setColumnFamilyTimeRange(cf, 0, 1); assertFalse(scanner.shouldUseScanner(scan, store, 0)); From 7f3921ae40be4e89d7b9919318906a9382045d65 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Thu, 9 Nov 2023 12:59:50 -0500 Subject: [PATCH 135/514] HBASE-28175 Deep copy RpcLogDetails' param field (#5481) Signed-off-by: Viraj Jasani Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/client/SlowLogParams.java | 2 +- .../hbase/namequeues/RpcLogDetails.java | 18 +- .../hbase/namequeues/TestRpcLogDetails.java | 265 ++++++++++++++++++ 3 files changed, 282 insertions(+), 3 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java index 6af7c42c26dd..92405fbc06b7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java @@ -82,7 +82,7 @@ public boolean equals(Object o) { } SlowLogParams that = (SlowLogParams) o; return new EqualsBuilder().append(regionName, that.regionName).append(params, that.params) - .append("scan", scan).isEquals(); + .append(scan, that.scan).isEquals(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java index eb35d886bbb0..235d82302d64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java @@ -21,7 +21,10 @@ import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** @@ -32,8 +35,10 @@ public class RpcLogDetails extends NamedQueuePayload { public static final int SLOW_LOG_EVENT = 0; + private static final Logger LOG = LoggerFactory.getLogger(RpcLogDetails.class.getName()); + private final RpcCall rpcCall; - private final Message param; + private Message param; private final String clientAddress; private final long responseSize; private final long blockBytesScanned; @@ -47,7 +52,6 @@ public RpcLogDetails(RpcCall rpcCall, Message param, String clientAddress, long long blockBytesScanned, String className, boolean isSlowLog, boolean isLargeLog) { super(SLOW_LOG_EVENT); this.rpcCall = rpcCall; - this.param = param; this.clientAddress = clientAddress; this.responseSize = responseSize; this.blockBytesScanned = blockBytesScanned; @@ -60,6 +64,16 @@ public RpcLogDetails(RpcCall rpcCall, Message param, String clientAddress, long // would result in corrupted attributes this.connectionAttributes = rpcCall.getConnectionAttributes(); this.requestAttributes = rpcCall.getRequestAttributes(); + + // We also need to deep copy the message because the CodedInputStream may be + // overwritten before this slow log is consumed. Such overwriting could + // cause the slow log payload to be corrupt + try { + this.param = param.newBuilderForType().mergeFrom(param.toByteArray()).build(); + } catch (InvalidProtocolBufferException e) { + LOG.error("Failed to parse protobuf for message {}", param, e); + this.param = param; + } } public RpcCall getRpcCall() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java new file mode 100644 index 000000000000..8a93f2d0ff54 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java @@ -0,0 +1,265 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.namequeues; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcCallback; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; +import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; + +@Category({ RegionServerTests.class, SmallTests.class }) +public class TestRpcLogDetails { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRpcLogDetails.class); + + private final ClientProtos.Scan scan = + ClientProtos.Scan.newBuilder().setStartRow(ByteString.copyFrom(Bytes.toBytes("abc"))) + .setStopRow(ByteString.copyFrom(Bytes.toBytes("xyz"))).build(); + private final ClientProtos.Scan otherScan = + ClientProtos.Scan.newBuilder().setStartRow(ByteString.copyFrom(Bytes.toBytes("def"))) + .setStopRow(ByteString.copyFrom(Bytes.toBytes("uvw"))).build(); + private final ClientProtos.ScanRequest scanRequest = ClientProtos.ScanRequest + .newBuilder(ClientProtos.ScanRequest.getDefaultInstance()).setScan(scan).build(); + private final ClientProtos.ScanRequest otherScanRequest = ClientProtos.ScanRequest + .newBuilder(ClientProtos.ScanRequest.getDefaultInstance()).setScan(otherScan).build(); + + @Test + public void itDeepCopiesRpcLogDetailsParams() throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(scanRequest.toByteArray().length); + CodedInputStream cis = UnsafeByteOperations.unsafeWrap(buffer).newCodedInput(); + cis.enableAliasing(true); + buffer.put(scanRequest.toByteArray()); + Message.Builder messageBuilder = ClientProtos.ScanRequest.newBuilder(); + ProtobufUtil.mergeFrom(messageBuilder, cis, buffer.capacity()); + Message message = messageBuilder.build(); + RpcLogDetails rpcLogDetails = + new RpcLogDetails(getRpcCall(message), message, null, 0L, 0L, null, true, false); + + // log's scan should be equal + ClientProtos.Scan logScan = ((ClientProtos.ScanRequest) rpcLogDetails.getParam()).getScan(); + assertEquals(logScan, scan); + + // ensure we have a different byte array for testing + assertFalse(Arrays.equals(scanRequest.toByteArray(), otherScanRequest.toByteArray())); + + // corrupt the underlying buffer + buffer.position(0); + buffer.put(otherScanRequest.toByteArray(), 0, otherScanRequest.toByteArray().length); + assertArrayEquals(otherScanRequest.toByteArray(), buffer.array()); + + // log scan should still be original scan + assertEquals(logScan, scan); + } + + @SuppressWarnings("checkstyle:methodlength") + private static RpcCall getRpcCall(Message message) { + RpcCall rpcCall = new RpcCall() { + @Override + public BlockingService getService() { + return null; + } + + @Override + public Descriptors.MethodDescriptor getMethod() { + return null; + } + + @Override + public Message getParam() { + return message; + } + + @Override + public CellScanner getCellScanner() { + return null; + } + + @Override + public long getReceiveTime() { + return 0; + } + + @Override + public long getStartTime() { + return 0; + } + + @Override + public void setStartTime(long startTime) { + } + + @Override + public int getTimeout() { + return 0; + } + + @Override + public int getPriority() { + return 0; + } + + @Override + public long getDeadline() { + return 0; + } + + @Override + public long getSize() { + return 0; + } + + @Override + public RPCProtos.RequestHeader getHeader() { + return null; + } + + @Override + public Map getConnectionAttributes() { + return Collections.emptyMap(); + } + + @Override + public Map getRequestAttributes() { + return Collections.emptyMap(); + } + + @Override + public byte[] getRequestAttribute(String key) { + return null; + } + + @Override + public int getRemotePort() { + return 0; + } + + @Override + public void setResponse(Message param, CellScanner cells, Throwable errorThrowable, + String error) { + } + + @Override + public void sendResponseIfReady() throws IOException { + } + + @Override + public void cleanup() { + } + + @Override + public String toShortString() { + return null; + } + + @Override + public long disconnectSince() { + return 0; + } + + @Override + public boolean isClientCellBlockSupported() { + return false; + } + + @Override + public Optional getRequestUser() { + return null; + } + + @Override + public InetAddress getRemoteAddress() { + return null; + } + + @Override + public HBaseProtos.VersionInfo getClientVersionInfo() { + return null; + } + + @Override + public void setCallBack(RpcCallback callback) { + } + + @Override + public boolean isRetryImmediatelySupported() { + return false; + } + + @Override + public long getResponseCellSize() { + return 0; + } + + @Override + public void incrementResponseCellSize(long cellSize) { + } + + @Override + public long getBlockBytesScanned() { + return 0; + } + + @Override + public void incrementBlockBytesScanned(long blockSize) { + } + + @Override + public long getResponseExceptionSize() { + return 0; + } + + @Override + public void incrementResponseExceptionSize(long exceptionSize) { + } + }; + return rpcCall; + } + +} From 7151581f51faf53b1f85bf5ed687e4006485d4f9 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 10 Nov 2023 08:58:09 -0500 Subject: [PATCH 136/514] HBASE-27276 Reduce reflection overhead in Filter deserialization (#5488) Signed-off-by: Nick Dimiduk Signed-off-by: Duo Zhang --- .../hbase/shaded/protobuf/ProtobufUtil.java | 50 +++++++- .../apache/hadoop/hbase/client/TestGet.java | 12 +- .../hbase/util/ReflectedFunctionCache.java | 110 ++++++++++++++++++ .../hadoop/hbase/util/ReflectionUtils.java | 30 +++++ .../hbase/util/ClassLoaderTestHelper.java | 17 +++ .../filter/TestComparatorSerialization.java | 81 +++++++++++++ .../hbase/filter/TestFilterSerialization.java | 81 +++++++++++++ .../CustomLoadedComparator.java.template | 66 +++++++++++ .../CustomLoadedFilter.java.template | 69 +++++++++++ 9 files changed, 505 insertions(+), 11 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectedFunctionCache.java create mode 100644 hbase-server/src/test/resources/CustomLoadedComparator.java.template create mode 100644 hbase-server/src/test/resources/CustomLoadedFilter.java.template diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d2e14df1c8d6..598ad932e679 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -127,6 +127,7 @@ import org.apache.hadoop.hbase.util.DynamicClassLoader; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Methods; +import org.apache.hadoop.hbase.util.ReflectedFunctionCache; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; @@ -306,6 +307,23 @@ public static boolean isClassLoaderLoaded() { return classLoaderLoaded; } + private static final String PARSE_FROM = "parseFrom"; + + // We don't bother using the dynamic CLASS_LOADER above, because currently we can't support + // optimizing dynamically loaded classes. We can do it once we build for java9+, see the todo + // in ReflectedFunctionCache + private static final ReflectedFunctionCache FILTERS = + new ReflectedFunctionCache<>(Filter.class, byte[].class, PARSE_FROM); + private static final ReflectedFunctionCache COMPARATORS = + new ReflectedFunctionCache<>(ByteArrayComparable.class, byte[].class, PARSE_FROM); + + private static volatile boolean ALLOW_FAST_REFLECTION_FALLTHROUGH = true; + + // Visible for tests + public static void setAllowFastReflectionFallthrough(boolean val) { + ALLOW_FAST_REFLECTION_FALLTHROUGH = val; + } + /** * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, to flag what * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. @@ -1554,13 +1572,23 @@ public static ComparatorProtos.Comparator toComparator(ByteArrayComparable compa public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) throws IOException { String type = proto.getName(); - String funcName = "parseFrom"; byte[] value = proto.getSerializedComparator().toByteArray(); + try { + ByteArrayComparable result = COMPARATORS.getAndCallByName(type, value); + if (result != null) { + return result; + } + + if (!ALLOW_FAST_REFLECTION_FALLTHROUGH) { + throw new IllegalStateException("Failed to deserialize comparator " + type + + " because fast reflection returned null and fallthrough is disabled"); + } + Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); - Method parseFrom = c.getMethod(funcName, byte[].class); + Method parseFrom = c.getMethod(PARSE_FROM, byte[].class); if (parseFrom == null) { - throw new IOException("Unable to locate function: " + funcName + " in type: " + type); + throw new IOException("Unable to locate function: " + PARSE_FROM + " in type: " + type); } return (ByteArrayComparable) parseFrom.invoke(null, value); } catch (Exception e) { @@ -1577,12 +1605,22 @@ public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto public static Filter toFilter(FilterProtos.Filter proto) throws IOException { String type = proto.getName(); final byte[] value = proto.getSerializedFilter().toByteArray(); - String funcName = "parseFrom"; + try { + Filter result = FILTERS.getAndCallByName(type, value); + if (result != null) { + return result; + } + + if (!ALLOW_FAST_REFLECTION_FALLTHROUGH) { + throw new IllegalStateException("Failed to deserialize comparator " + type + + " because fast reflection returned null and fallthrough is disabled"); + } + Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); - Method parseFrom = c.getMethod(funcName, byte[].class); + Method parseFrom = c.getMethod(PARSE_FROM, byte[].class); if (parseFrom == null) { - throw new IOException("Unable to locate function: " + funcName + " in type: " + type); + throw new IOException("Unable to locate function: " + PARSE_FROM + " in type: " + type); } return (Filter) parseFrom.invoke(c, value); } catch (Exception e) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 69c33c833b0c..ca1a708e64f6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.client; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -25,7 +27,6 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; -import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Base64; @@ -34,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.KeyOnlyFilter; @@ -48,6 +48,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.base.Throwables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @@ -226,9 +228,9 @@ public void testDynamicFilter() throws Exception { ProtobufUtil.toGet(getProto2); fail("Should not be able to load the filter class"); } catch (IOException ioe) { - assertTrue(ioe.getCause() instanceof InvocationTargetException); - InvocationTargetException ite = (InvocationTargetException) ioe.getCause(); - assertTrue(ite.getTargetException() instanceof DeserializationException); + // This test is deserializing a FilterList, and one of the sub-filters is not found. + // So the actual caused by is buried a few levels deep. + assertThat(Throwables.getRootCause(ioe), instanceOf(ClassNotFoundException.class)); } FileOutputStream fos = new FileOutputStream(jarFile); fos.write(Base64.getDecoder().decode(MOCK_FILTER_JAR)); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectedFunctionCache.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectedFunctionCache.java new file mode 100644 index 000000000000..61b60861739a --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectedFunctionCache.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Cache to hold resolved Functions of a specific signature, generated through reflection. These can + * be (relatively) costly to create, but then are much faster than typical Method.invoke calls when + * executing. The cache is built-up on demand as calls are made to new classes. The functions are + * cached for the lifetime of the process. If a function cannot be created (security reasons, method + * not found, etc), a fallback function is cached which always returns null. Callers to + * {@link #getAndCallByName(String, Object)} should have handling for null return values. + *

+ * An instance is created for a specified baseClass (i.e. Filter), argClass (i.e. byte[]), and + * static methodName to call. These are used to resolve a Function which delegates to that static + * method, if it is found. + * @param the input argument type for the resolved functions + * @param the return type for the resolved functions + */ +@InterfaceAudience.Private +public final class ReflectedFunctionCache { + + private static final Logger LOG = LoggerFactory.getLogger(ReflectedFunctionCache.class); + + private final ConcurrentMap> lambdasByClass = + new ConcurrentHashMap<>(); + private final Class baseClass; + private final Class argClass; + private final String methodName; + private final ClassLoader classLoader; + + public ReflectedFunctionCache(Class baseClass, Class argClass, String staticMethodName) { + this.classLoader = getClass().getClassLoader(); + this.baseClass = baseClass; + this.argClass = argClass; + this.methodName = staticMethodName; + } + + /** + * Get and execute the Function for the given className, passing the argument to the function and + * returning the result. + * @param className the full name of the class to lookup + * @param argument the argument to pass to the function, if found. + * @return null if a function is not found for classname, otherwise the result of the function. + */ + @Nullable + public R getAndCallByName(String className, I argument) { + // todo: if we ever make java9+ our lowest supported jdk version, we can + // handle generating these for newly loaded classes from our DynamicClassLoader using + // MethodHandles.privateLookupIn(). For now this is not possible, because we can't easily + // create a privileged lookup in a non-default ClassLoader. So while this cache loads + // over time, it will never load a custom filter from "hbase.dynamic.jars.dir". + Function lambda = + ConcurrentMapUtils.computeIfAbsent(lambdasByClass, className, () -> loadFunction(className)); + + return lambda.apply(argument); + } + + private Function loadFunction(String className) { + long startTime = System.nanoTime(); + try { + Class clazz = Class.forName(className, false, classLoader); + if (!baseClass.isAssignableFrom(clazz)) { + LOG.debug("Requested class {} is not assignable to {}, skipping creation of function", + className, baseClass.getName()); + return this::notFound; + } + return ReflectionUtils.getOneArgStaticMethodAsFunction(clazz, methodName, argClass, + (Class) clazz); + } catch (Throwable t) { + LOG.debug("Failed to create function for {}", className, t); + return this::notFound; + } finally { + LOG.debug("Populated cache for {} in {}ms", className, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); + } + } + + /** + * In order to use computeIfAbsent, we can't store nulls in our cache. So we store a lambda which + * resolves to null. The contract is that getAndCallByName returns null in this case. + */ + private R notFound(I argument) { + return null; + } + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java index 2d893e50c938..304358e33022 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java @@ -21,6 +21,11 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.lang.invoke.CallSite; +import java.lang.invoke.LambdaMetafactory; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; @@ -29,6 +34,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.charset.Charset; +import java.util.function.Function; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -208,6 +214,30 @@ private static String getTaskName(long id, String name) { return id + " (" + name + ")"; } + /** + * Creates a Function which can be called to performantly execute a reflected static method. The + * creation of the Function itself may not be fast, but executing that method thereafter should be + * much faster than {@link #invokeMethod(Object, String, Object...)}. + * @param lookupClazz the class to find the static method in + * @param methodName the method name + * @param argumentClazz the type of the argument + * @param returnValueClass the type of the return value + * @return a function which when called executes the requested static method. + * @throws Throwable exception types from the underlying reflection + */ + public static Function getOneArgStaticMethodAsFunction(Class lookupClazz, + String methodName, Class argumentClazz, Class returnValueClass) throws Throwable { + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodHandle methodHandle = lookup.findStatic(lookupClazz, methodName, + MethodType.methodType(returnValueClass, argumentClazz)); + CallSite site = + LambdaMetafactory.metafactory(lookup, "apply", MethodType.methodType(Function.class), + methodHandle.type().generic(), methodHandle, methodHandle.type()); + + return (Function) site.getTarget().invokeExact(); + + } + /** * Get and invoke the target method from the given object with given parameters * @param obj the object to get and invoke method from diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java index da11879b9b9d..2bfce9908776 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.util; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -202,4 +203,20 @@ public static void addJarFilesToJar(File targetJar, String libPrefix, File... sr public static String localDirPath(Configuration conf) { return conf.get(ClassLoaderBase.LOCAL_DIR_KEY) + File.separator + "jars" + File.separator; } + + public static void deleteClass(String className, String testDir, Configuration conf) + throws Exception { + String jarFileName = className + ".jar"; + File file = new File(testDir, jarFileName); + file.delete(); + assertFalse("Should be deleted: " + file.getPath(), file.exists()); + + file = new File(conf.get("hbase.dynamic.jars.dir"), jarFileName); + file.delete(); + assertFalse("Should be deleted: " + file.getPath(), file.exists()); + + file = new File(ClassLoaderTestHelper.localDirPath(conf), jarFileName); + file.delete(); + assertFalse("Should be deleted: " + file.getPath(), file.exists()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java index 74fc54662c92..b99538e33cbe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java @@ -19,19 +19,35 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.io.IOException; import java.math.BigDecimal; +import java.nio.charset.Charset; +import java.util.Collections; import java.util.regex.Pattern; +import org.apache.commons.io.IOUtils; +import org.apache.commons.text.StringSubstitutor; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; +import org.junit.AfterClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; +@RunWith(Parameterized.class) @Category({ FilterTests.class, SmallTests.class }) public class TestComparatorSerialization { @@ -39,6 +55,20 @@ public class TestComparatorSerialization { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestComparatorSerialization.class); + @Parameterized.Parameter(0) + public boolean allowFastReflectionFallthrough; + + @Parameterized.Parameters(name = "{index}: allowFastReflectionFallthrough={0}") + public static Iterable data() { + return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; + } + + @AfterClass + public static void afterClass() throws Exception { + // set back to true so that it doesn't affect any other tests + ProtobufUtil.setAllowFastReflectionFallthrough(true); + } + @Test public void testBinaryComparator() throws Exception { BinaryComparator binaryComparator = new BinaryComparator(Bytes.toBytes("binaryComparator")); @@ -99,4 +129,55 @@ public void testBigDecimalComparator() throws Exception { ProtobufUtil.toComparator(ProtobufUtil.toComparator(bigDecimalComparator)))); } + /** + * Test that we can load and deserialize custom comparators. Good to have generally, but also + * proves that this still works after HBASE-27276 despite not going through our fast function + * caches. + */ + @Test + public void testCustomComparator() throws Exception { + ByteArrayComparable baseFilter = new BinaryComparator("foo".getBytes()); + ComparatorProtos.Comparator proto = ProtobufUtil.toComparator(baseFilter); + String suffix = "" + System.currentTimeMillis() + allowFastReflectionFallthrough; + String className = "CustomLoadedComparator" + suffix; + proto = proto.toBuilder().setName(className).build(); + + Configuration conf = HBaseConfiguration.create(); + HBaseTestingUtil testUtil = new HBaseTestingUtil(); + String dataTestDir = testUtil.getDataTestDir().toString(); + + // First make sure the test bed is clean, delete any pre-existing class. + // Below toComparator call is expected to fail because the comparator is not loaded now + ClassLoaderTestHelper.deleteClass(className, dataTestDir, conf); + try { + ProtobufUtil.toComparator(proto); + fail("expected to fail"); + } catch (IOException e) { + // do nothing, this is expected + } + + // Write a jar to be loaded into the classloader + String code = StringSubstitutor.replace( + IOUtils.toString(getClass().getResourceAsStream("/CustomLoadedComparator.java.template"), + Charset.defaultCharset()), + Collections.singletonMap("suffix", suffix)); + ClassLoaderTestHelper.buildJar(dataTestDir, className, code, + ClassLoaderTestHelper.localDirPath(conf)); + + // Disallow fallthrough at first. We expect below to fail because the custom comparator is not + // available at initialization so not in the cache. + ProtobufUtil.setAllowFastReflectionFallthrough(false); + try { + ProtobufUtil.toComparator(proto); + fail("expected to fail"); + } catch (IOException e) { + // do nothing, this is expected + } + + // Now the deserialization should pass with fallthrough enabled. This proves that custom + // comparators can work despite not being supported by cache. + ProtobufUtil.setAllowFastReflectionFallthrough(true); + ProtobufUtil.toComparator(proto); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java index d58052811fe8..e3a13e5ec7ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java @@ -18,24 +18,40 @@ package org.apache.hadoop.hbase.filter; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; +import org.apache.commons.io.IOUtils; +import org.apache.commons.text.StringSubstitutor; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.filter.MultiRowRangeFilter.RowRange; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; +import org.junit.AfterClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +@RunWith(Parameterized.class) @Category({ FilterTests.class, MediumTests.class }) public class TestFilterSerialization { @@ -43,6 +59,20 @@ public class TestFilterSerialization { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFilterSerialization.class); + @Parameterized.Parameter(0) + public boolean allowFastReflectionFallthrough; + + @Parameterized.Parameters(name = "{index}: allowFastReflectionFallthrough={0}") + public static Iterable data() { + return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; + } + + @AfterClass + public static void afterClass() throws Exception { + // set back to true so that it doesn't affect any other tests + ProtobufUtil.setAllowFastReflectionFallthrough(true); + } + @Test public void testColumnCountGetFilter() throws Exception { ColumnCountGetFilter columnCountGetFilter = new ColumnCountGetFilter(1); @@ -322,4 +352,55 @@ public void testColumnValueFilter() throws Exception { assertTrue(columnValueFilter .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnValueFilter)))); } + + /** + * Test that we can load and deserialize custom filters. Good to have generally, but also proves + * that this still works after HBASE-27276 despite not going through our fast function caches. + */ + @Test + public void testCustomFilter() throws Exception { + Filter baseFilter = new PrefixFilter("foo".getBytes()); + FilterProtos.Filter filterProto = ProtobufUtil.toFilter(baseFilter); + String suffix = "" + System.currentTimeMillis() + allowFastReflectionFallthrough; + String className = "CustomLoadedFilter" + suffix; + filterProto = filterProto.toBuilder().setName(className).build(); + + Configuration conf = HBaseConfiguration.create(); + HBaseTestingUtil testUtil = new HBaseTestingUtil(); + String dataTestDir = testUtil.getDataTestDir().toString(); + + // First make sure the test bed is clean, delete any pre-existing class. + // Below toComparator call is expected to fail because the comparator is not loaded now + ClassLoaderTestHelper.deleteClass(className, dataTestDir, conf); + try { + Filter filter = ProtobufUtil.toFilter(filterProto); + fail("expected to fail"); + } catch (DoNotRetryIOException e) { + // do nothing, this is expected + } + + // Write a jar to be loaded into the classloader + String code = StringSubstitutor + .replace(IOUtils.toString(getClass().getResourceAsStream("/CustomLoadedFilter.java.template"), + Charset.defaultCharset()), Collections.singletonMap("suffix", suffix)); + ClassLoaderTestHelper.buildJar(dataTestDir, className, code, + ClassLoaderTestHelper.localDirPath(conf)); + + // Disallow fallthrough at first. We expect below to fail because the custom filter is not + // available at initialization so not in the cache. + ProtobufUtil.setAllowFastReflectionFallthrough(false); + try { + ProtobufUtil.toFilter(filterProto); + fail("expected to fail"); + } catch (DoNotRetryIOException e) { + // do nothing, this is expected + } + + // Now the deserialization should pass with fallthrough enabled. This proves that custom + // filters can work despite not being supported by cache. + ProtobufUtil.setAllowFastReflectionFallthrough(true); + ProtobufUtil.toFilter(filterProto); + + } + } diff --git a/hbase-server/src/test/resources/CustomLoadedComparator.java.template b/hbase-server/src/test/resources/CustomLoadedComparator.java.template new file mode 100644 index 000000000000..38572f62db45 --- /dev/null +++ b/hbase-server/src/test/resources/CustomLoadedComparator.java.template @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.TestFilterSerialization; + +/** + * Just wraps around a delegate, the only goal here is to create a Comparable which doesn't exist + * in org.apache.hadoop.hbase.filter so it doesn't get automatically loaded at startup. We can + * pass it into the DynamicClassLoader to prove that (de)serialization works. + */ +public class CustomLoadedComparator${suffix} extends ByteArrayComparable { + + private final BinaryComparator delegate; + + public CustomLoadedComparator${suffix}(BinaryComparator delegate) { + super(delegate.getValue()); + this.delegate = delegate; + } + + @Override + public byte[] toByteArray() { + return delegate.toByteArray(); + } + + public static CustomLoadedComparator${suffix} parseFrom(final byte[] pbBytes) throws + DeserializationException { + return new CustomLoadedComparator${suffix}(BinaryComparator.parseFrom(pbBytes)); + } + + @Override public int compareTo(byte[] value, int offset, int length) { + return delegate.compareTo(value, offset, length); + } + + @Override public byte[] getValue() { + return delegate.getValue(); + } + + @Override public int compareTo(byte[] value) { + return delegate.compareTo(value); + } + + @Override public int hashCode() { + return delegate.hashCode(); + } + + @Override public boolean equals(Object obj) { + return super.equals(obj); + } +} diff --git a/hbase-server/src/test/resources/CustomLoadedFilter.java.template b/hbase-server/src/test/resources/CustomLoadedFilter.java.template new file mode 100644 index 000000000000..84ef99feb9f1 --- /dev/null +++ b/hbase-server/src/test/resources/CustomLoadedFilter.java.template @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.util.Objects; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.filter.FilterBase; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +/** + * Just wraps around a delegate, the only goal here is to create a filter which doesn't exist + * in org.apache.hadoop.hbase.filter so it doesn't get automatically loaded at startup. We can + * pass it into the DynamicClassLoader to prove that (de)serialization works. + */ +public class CustomLoadedFilter${suffix} extends FilterBase { + + private final PrefixFilter delegate; + + public CustomLoadedFilter${suffix}(PrefixFilter delegate) { + this.delegate = delegate; + } + + @Override + public byte[] toByteArray() { + FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder(); + if (this.delegate.getPrefix() != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.delegate.getPrefix())); + return builder.build().toByteArray(); + } + + public static CustomLoadedFilter${suffix} parseFrom(final byte[] pbBytes) throws + DeserializationException { + FilterProtos.PrefixFilter proto; + try { + proto = FilterProtos.PrefixFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new CustomLoadedFilter${suffix}(new PrefixFilter(proto.hasPrefix() ? proto.getPrefix().toByteArray() : null)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CustomLoadedFilter${suffix} that = (CustomLoadedFilter${suffix}) o; + return Objects.equals(delegate, that.delegate); + } + + @Override + public int hashCode() { + return Objects.hash(delegate); + } +} From e806350bd0956e5d981e49afba57f6591093ec6c Mon Sep 17 00:00:00 2001 From: Vinod Anandan Date: Fri, 10 Nov 2023 16:08:53 +0200 Subject: [PATCH 137/514] HBASE-28193 Update plugin for SBOM generation to 2.7.10 (#5485) Signed-off-by: Duo Zhang --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 28f57084c97a..2b1da88e056f 100644 --- a/pom.xml +++ b/pom.xml @@ -3500,7 +3500,7 @@ org.cyclonedx cyclonedx-maven-plugin - 2.7.6 + 2.7.10 From 7a660c8ab7985a293a452a2fc0f9c53400cb2bb6 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 13 Nov 2023 10:23:09 +0100 Subject: [PATCH 138/514] HBASE-28198 Fix broken link to replication documentation --- src/site/asciidoc/replication.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/site/asciidoc/replication.adoc b/src/site/asciidoc/replication.adoc index 908975434a1c..a413f8f7f8f6 100644 --- a/src/site/asciidoc/replication.adoc +++ b/src/site/asciidoc/replication.adoc @@ -19,4 +19,4 @@ under the License. = Apache HBase (TM) Replication -This information has been moved to link:book.html#cluster_replication"[the Cluster Replication] section of the link:book.html[Apache HBase Reference Guide]. +This information has been moved to link:book.html#_cluster_replication"[the Cluster Replication] section of the link:book.html[Apache HBase Reference Guide]. From 23c41560d58cc1353b8a466deacd02dfee9e6743 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Mon, 13 Nov 2023 09:35:12 -0500 Subject: [PATCH 139/514] HBASE-24687: Use existing HMaster Connection in MobFileCleanerChore (#5509) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang --- .../hbase/IntegrationTestMobCompaction.java | 14 +- .../hadoop/hbase/mob/MobFileCleanerChore.java | 223 +--------------- .../hadoop/hbase/mob/MobFileCleanupUtil.java | 250 ++++++++++++++++++ .../hadoop/hbase/mob/MobStressToolRunner.java | 9 +- .../mob/TestMobCompactionWithDefaults.java | 3 - ...Chore.java => TestMobFileCleanupUtil.java} | 14 +- 6 files changed, 269 insertions(+), 244 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/mob/{TestMobFileCleanerChore.java => TestMobFileCleanupUtil.java} (95%) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMobCompaction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMobCompaction.java index f54d815ad4db..8e1952a696cd 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMobCompaction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMobCompaction.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.mob.FaultyMobStoreCompactor; import org.apache.hadoop.hbase.mob.MobConstants; -import org.apache.hadoop.hbase.mob.MobFileCleanerChore; +import org.apache.hadoop.hbase.mob.MobFileCleanupUtil; import org.apache.hadoop.hbase.mob.MobStoreEngine; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.IntegrationTests; @@ -100,7 +100,6 @@ public class IntegrationTestMobCompaction extends IntegrationTestBase { private static ColumnFamilyDescriptor familyDescriptor; private static Admin admin; private static Table table = null; - private static MobFileCleanerChore chore; private static volatile boolean run = true; @@ -249,12 +248,9 @@ static class CleanMobAndArchive implements Runnable { public void run() { while (run) { try { - LOG.info("MOB cleanup chore started ..."); - if (chore == null) { - chore = new MobFileCleanerChore(); - } - chore.cleanupObsoleteMobFiles(conf, table.getName()); - LOG.info("MOB cleanup chore finished"); + LOG.info("MOB cleanup started ..."); + MobFileCleanupUtil.cleanupObsoleteMobFiles(conf, table.getName(), admin); + LOG.info("MOB cleanup finished"); Thread.sleep(130000); } catch (Exception e) { @@ -329,7 +325,7 @@ public void testMobCompaction() throws InterruptedException, IOException { LOG.info("Waiting for write thread to finish ..."); writeData.join(); // Cleanup again - chore.cleanupObsoleteMobFiles(conf, table.getName()); + MobFileCleanupUtil.cleanupObsoleteMobFiles(conf, table.getName(), admin); if (util != null) { LOG.info("Archive cleaner started ..."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java index 2c78c6f5ac74..fda9f1292eb6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java @@ -17,42 +17,20 @@ */ package org.apache.hadoop.hbase.mob; -import java.io.FileNotFoundException; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStoreFile; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.collect.SetMultimap; - /** * The class MobFileCleanerChore for running cleaner regularly to remove the expired and obsolete * (files which have no active references to) mob files. @@ -94,10 +72,6 @@ private void checkObsoleteConfigurations() { } } - public MobFileCleanerChore() { - this.master = null; - } - @Override protected void chore() { TableDescriptors htds = master.getTableDescriptors(); @@ -123,7 +97,10 @@ protected void chore() { try { // Now clean obsolete files for a table LOG.info("Cleaning obsolete MOB files from table={}", htd.getTableName()); - cleanupObsoleteMobFiles(master.getConfiguration(), htd.getTableName()); + try (final Admin admin = master.getConnection().getAdmin()) { + MobFileCleanupUtil.cleanupObsoleteMobFiles(master.getConfiguration(), htd.getTableName(), + admin); + } LOG.info("Cleaning obsolete MOB files finished for table={}", htd.getTableName()); } catch (IOException e) { LOG.error("Failed to clean the obsolete mob files for table={}", htd.getTableName(), e); @@ -131,196 +108,4 @@ protected void chore() { } } - /** - * Performs housekeeping file cleaning (called by MOB Cleaner chore) - * @param conf configuration - * @param table table name - * @throws IOException exception - */ - public void cleanupObsoleteMobFiles(Configuration conf, TableName table) throws IOException { - - long minAgeToArchive = - conf.getLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, MobConstants.DEFAULT_MIN_AGE_TO_ARCHIVE); - // We check only those MOB files, which creation time is less - // than maxCreationTimeToArchive. This is a current time - 1h. 1 hour gap - // gives us full confidence that all corresponding store files will - // exist at the time cleaning procedure begins and will be examined. - // So, if MOB file creation time is greater than this maxTimeToArchive, - // this will be skipped and won't be archived. - long maxCreationTimeToArchive = EnvironmentEdgeManager.currentTime() - minAgeToArchive; - try (final Connection conn = ConnectionFactory.createConnection(conf); - final Admin admin = conn.getAdmin();) { - TableDescriptor htd = admin.getDescriptor(table); - List list = MobUtils.getMobColumnFamilies(htd); - if (list.size() == 0) { - LOG.info("Skipping non-MOB table [{}]", table); - return; - } else { - LOG.info("Only MOB files whose creation time older than {} will be archived, table={}", - maxCreationTimeToArchive, table); - } - - FileSystem fs = FileSystem.get(conf); - Set regionNames = new HashSet<>(); - Path rootDir = CommonFSUtils.getRootDir(conf); - Path tableDir = CommonFSUtils.getTableDir(rootDir, table); - List regionDirs = FSUtils.getRegionDirs(fs, tableDir); - - Set allActiveMobFileName = new HashSet(); - for (Path regionPath : regionDirs) { - regionNames.add(regionPath.getName()); - for (ColumnFamilyDescriptor hcd : list) { - String family = hcd.getNameAsString(); - Path storePath = new Path(regionPath, family); - boolean succeed = false; - Set regionMobs = new HashSet(); - - while (!succeed) { - if (!fs.exists(storePath)) { - String errMsg = String.format("Directory %s was deleted during MOB file cleaner chore" - + " execution, aborting MOB file cleaner chore.", storePath); - throw new IOException(errMsg); - } - RemoteIterator rit = fs.listLocatedStatus(storePath); - List storeFiles = new ArrayList(); - // Load list of store files first - while (rit.hasNext()) { - Path p = rit.next().getPath(); - if (fs.isFile(p)) { - storeFiles.add(p); - } - } - LOG.info("Found {} store files in: {}", storeFiles.size(), storePath); - Path currentPath = null; - try { - for (Path pp : storeFiles) { - currentPath = pp; - LOG.trace("Store file: {}", pp); - HStoreFile sf = null; - byte[] mobRefData = null; - byte[] bulkloadMarkerData = null; - try { - sf = new HStoreFile(fs, pp, conf, CacheConfig.DISABLED, BloomType.NONE, true); - sf.initReader(); - mobRefData = sf.getMetadataValue(HStoreFile.MOB_FILE_REFS); - bulkloadMarkerData = sf.getMetadataValue(HStoreFile.BULKLOAD_TASK_KEY); - // close store file to avoid memory leaks - sf.closeStoreFile(true); - } catch (IOException ex) { - // When FileBased SFT is active the store dir can contain corrupted or incomplete - // files. So read errors are expected. We just skip these files. - if (ex instanceof FileNotFoundException) { - throw ex; - } - LOG.debug("Failed to get mob data from file: {} due to error.", pp.toString(), - ex); - continue; - } - if (mobRefData == null) { - if (bulkloadMarkerData == null) { - LOG.warn("Found old store file with no MOB_FILE_REFS: {} - " - + "can not proceed until all old files will be MOB-compacted.", pp); - return; - } else { - LOG.debug("Skipping file without MOB references (bulkloaded file):{}", pp); - continue; - } - } - // file may or may not have MOB references, but was created by the distributed - // mob compaction code. - try { - SetMultimap mobs = - MobUtils.deserializeMobFileRefs(mobRefData).build(); - LOG.debug("Found {} mob references for store={}", mobs.size(), sf); - LOG.trace("Specific mob references found for store={} : {}", sf, mobs); - regionMobs.addAll(mobs.values()); - } catch (RuntimeException exception) { - throw new IOException("failure getting mob references for hfile " + sf, - exception); - } - } - } catch (FileNotFoundException e) { - LOG.warn( - "Missing file:{} Starting MOB cleaning cycle from the beginning" + " due to error", - currentPath, e); - regionMobs.clear(); - continue; - } - succeed = true; - } - - // Add MOB references for current region/family - allActiveMobFileName.addAll(regionMobs); - } // END column families - } // END regions - // Check if number of MOB files too big (over 1M) - if (allActiveMobFileName.size() > 1000000) { - LOG.warn("Found too many active MOB files: {}, table={}, " - + "this may result in high memory pressure.", allActiveMobFileName.size(), table); - } - LOG.debug("Found: {} active mob refs for table={}", allActiveMobFileName.size(), table); - allActiveMobFileName.stream().forEach(LOG::trace); - - // Now scan MOB directories and find MOB files with no references to them - for (ColumnFamilyDescriptor hcd : list) { - List toArchive = new ArrayList(); - String family = hcd.getNameAsString(); - Path dir = MobUtils.getMobFamilyPath(conf, table, family); - RemoteIterator rit = fs.listLocatedStatus(dir); - while (rit.hasNext()) { - LocatedFileStatus lfs = rit.next(); - Path p = lfs.getPath(); - String[] mobParts = p.getName().split("_"); - String regionName = mobParts[mobParts.length - 1]; - - if (!regionNames.contains(regionName)) { - // MOB belonged to a region no longer hosted - long creationTime = fs.getFileStatus(p).getModificationTime(); - if (creationTime < maxCreationTimeToArchive) { - LOG.trace("Archiving MOB file {} creation time={}", p, - (fs.getFileStatus(p).getModificationTime())); - toArchive.add(p); - } else { - LOG.trace("Skipping fresh file: {}. Creation time={}", p, - fs.getFileStatus(p).getModificationTime()); - } - } else { - LOG.trace("Keeping MOB file with existing region: {}", p); - } - } - LOG.info(" MOB Cleaner found {} files to archive for table={} family={}", toArchive.size(), - table, family); - archiveMobFiles(conf, table, family.getBytes(), toArchive); - LOG.info(" MOB Cleaner archived {} files, table={} family={}", toArchive.size(), table, - family); - } - } - } - - /** - * Archives the mob files. - * @param conf The current configuration. - * @param tableName The table name. - * @param family The name of the column family. - * @param storeFiles The files to be archived. - * @throws IOException exception - */ - public void archiveMobFiles(Configuration conf, TableName tableName, byte[] family, - List storeFiles) throws IOException { - - if (storeFiles.size() == 0) { - // nothing to remove - LOG.debug("Skipping archiving old MOB files - no files found for table={} cf={}", tableName, - Bytes.toString(family)); - return; - } - Path mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName); - FileSystem fs = storeFiles.get(0).getFileSystem(conf); - - for (Path p : storeFiles) { - LOG.debug("MOB Cleaner is archiving: {}", p); - HFileArchiver.archiveStoreFile(conf, fs, MobUtils.getMobRegionInfo(tableName), mobTableDir, - family, p); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java new file mode 100644 index 000000000000..049192624ef3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mob; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.SetMultimap; + +@InterfaceAudience.Private +public final class MobFileCleanupUtil { + + private static final Logger LOG = LoggerFactory.getLogger(MobFileCleanupUtil.class); + + private MobFileCleanupUtil() { + } + + /** + * Performs housekeeping file cleaning (called by MOB Cleaner chore) + * @param conf configuration + * @param table table name + * @throws IOException exception + */ + public static void cleanupObsoleteMobFiles(Configuration conf, TableName table, Admin admin) + throws IOException { + long minAgeToArchive = + conf.getLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, MobConstants.DEFAULT_MIN_AGE_TO_ARCHIVE); + // We check only those MOB files, which creation time is less + // than maxCreationTimeToArchive. This is a current time - 1h. 1 hour gap + // gives us full confidence that all corresponding store files will + // exist at the time cleaning procedure begins and will be examined. + // So, if MOB file creation time is greater than this maxTimeToArchive, + // this will be skipped and won't be archived. + long maxCreationTimeToArchive = EnvironmentEdgeManager.currentTime() - minAgeToArchive; + TableDescriptor htd = admin.getDescriptor(table); + List list = MobUtils.getMobColumnFamilies(htd); + if (list.size() == 0) { + LOG.info("Skipping non-MOB table [{}]", table); + return; + } else { + LOG.info("Only MOB files whose creation time older than {} will be archived, table={}", + maxCreationTimeToArchive, table); + } + + FileSystem fs = FileSystem.get(conf); + Set regionNames = new HashSet<>(); + Path rootDir = CommonFSUtils.getRootDir(conf); + Path tableDir = CommonFSUtils.getTableDir(rootDir, table); + List regionDirs = FSUtils.getRegionDirs(fs, tableDir); + + Set allActiveMobFileName = new HashSet(); + for (Path regionPath : regionDirs) { + regionNames.add(regionPath.getName()); + for (ColumnFamilyDescriptor hcd : list) { + String family = hcd.getNameAsString(); + Path storePath = new Path(regionPath, family); + boolean succeed = false; + Set regionMobs = new HashSet(); + + while (!succeed) { + if (!fs.exists(storePath)) { + String errMsg = String.format("Directory %s was deleted during MOB file cleaner chore" + + " execution, aborting MOB file cleaner chore.", storePath); + throw new IOException(errMsg); + } + RemoteIterator rit = fs.listLocatedStatus(storePath); + List storeFiles = new ArrayList(); + // Load list of store files first + while (rit.hasNext()) { + Path p = rit.next().getPath(); + if (fs.isFile(p)) { + storeFiles.add(p); + } + } + LOG.info("Found {} store files in: {}", storeFiles.size(), storePath); + Path currentPath = null; + try { + for (Path pp : storeFiles) { + currentPath = pp; + LOG.trace("Store file: {}", pp); + HStoreFile sf = null; + byte[] mobRefData = null; + byte[] bulkloadMarkerData = null; + try { + sf = new HStoreFile(fs, pp, conf, CacheConfig.DISABLED, BloomType.NONE, true); + sf.initReader(); + mobRefData = sf.getMetadataValue(HStoreFile.MOB_FILE_REFS); + bulkloadMarkerData = sf.getMetadataValue(HStoreFile.BULKLOAD_TASK_KEY); + // close store file to avoid memory leaks + sf.closeStoreFile(true); + } catch (IOException ex) { + // When FileBased SFT is active the store dir can contain corrupted or incomplete + // files. So read errors are expected. We just skip these files. + if (ex instanceof FileNotFoundException) { + throw ex; + } + LOG.debug("Failed to get mob data from file: {} due to error.", pp.toString(), ex); + continue; + } + if (mobRefData == null) { + if (bulkloadMarkerData == null) { + LOG.warn("Found old store file with no MOB_FILE_REFS: {} - " + + "can not proceed until all old files will be MOB-compacted.", pp); + return; + } else { + LOG.debug("Skipping file without MOB references (bulkloaded file):{}", pp); + continue; + } + } + // file may or may not have MOB references, but was created by the distributed + // mob compaction code. + try { + SetMultimap mobs = + MobUtils.deserializeMobFileRefs(mobRefData).build(); + LOG.debug("Found {} mob references for store={}", mobs.size(), sf); + LOG.trace("Specific mob references found for store={} : {}", sf, mobs); + regionMobs.addAll(mobs.values()); + } catch (RuntimeException exception) { + throw new IOException("failure getting mob references for hfile " + sf, exception); + } + } + } catch (FileNotFoundException e) { + LOG.warn( + "Missing file:{} Starting MOB cleaning cycle from the beginning" + " due to error", + currentPath, e); + regionMobs.clear(); + continue; + } + succeed = true; + } + + // Add MOB references for current region/family + allActiveMobFileName.addAll(regionMobs); + } // END column families + } // END regions + // Check if number of MOB files too big (over 1M) + if (allActiveMobFileName.size() > 1000000) { + LOG.warn("Found too many active MOB files: {}, table={}, " + + "this may result in high memory pressure.", allActiveMobFileName.size(), table); + } + LOG.debug("Found: {} active mob refs for table={}", allActiveMobFileName.size(), table); + allActiveMobFileName.stream().forEach(LOG::trace); + + // Now scan MOB directories and find MOB files with no references to them + for (ColumnFamilyDescriptor hcd : list) { + checkColumnFamilyDescriptor(conf, table, fs, admin, hcd, regionNames, + maxCreationTimeToArchive); + } + } + + private static void checkColumnFamilyDescriptor(Configuration conf, TableName table, + FileSystem fs, Admin admin, ColumnFamilyDescriptor hcd, Set regionNames, + long maxCreationTimeToArchive) throws IOException { + List toArchive = new ArrayList(); + String family = hcd.getNameAsString(); + Path dir = MobUtils.getMobFamilyPath(conf, table, family); + RemoteIterator rit = fs.listLocatedStatus(dir); + while (rit.hasNext()) { + LocatedFileStatus lfs = rit.next(); + Path p = lfs.getPath(); + String[] mobParts = p.getName().split("_"); + String regionName = mobParts[mobParts.length - 1]; + + if (!regionNames.contains(regionName)) { + // MOB belonged to a region no longer hosted + long creationTime = fs.getFileStatus(p).getModificationTime(); + if (creationTime < maxCreationTimeToArchive) { + LOG.trace("Archiving MOB file {} creation time={}", p, + (fs.getFileStatus(p).getModificationTime())); + toArchive.add(p); + } else { + LOG.trace("Skipping fresh file: {}. Creation time={}", p, + fs.getFileStatus(p).getModificationTime()); + } + } else { + LOG.trace("Keeping MOB file with existing region: {}", p); + } + } + LOG.info(" MOB Cleaner found {} files to archive for table={} family={}", toArchive.size(), + table, family); + archiveMobFiles(conf, table, admin, family.getBytes(), toArchive); + LOG.info(" MOB Cleaner archived {} files, table={} family={}", toArchive.size(), table, family); + } + + /** + * Archives the mob files. + * @param conf The current configuration. + * @param tableName The table name. + * @param family The name of the column family. + * @param storeFiles The files to be archived. + * @throws IOException exception + */ + private static void archiveMobFiles(Configuration conf, TableName tableName, Admin admin, + byte[] family, List storeFiles) throws IOException { + + if (storeFiles.size() == 0) { + // nothing to remove + LOG.debug("Skipping archiving old MOB files - no files found for table={} cf={}", tableName, + Bytes.toString(family)); + return; + } + Path mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName); + FileSystem fs = storeFiles.get(0).getFileSystem(conf); + + for (Path p : storeFiles) { + LOG.debug("MOB Cleaner is archiving: {}", p); + HFileArchiver.archiveStoreFile(conf, fs, MobUtils.getMobRegionInfo(tableName), mobTableDir, + family, p); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java index b8d1750f8063..ec1a567591cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java @@ -70,7 +70,6 @@ public class MobStressToolRunner { private long count = 500000; private double failureProb = 0.1; private Table table = null; - private MobFileCleanerChore chore = new MobFileCleanerChore(); private static volatile boolean run = true; @@ -156,9 +155,9 @@ class CleanMobAndArchive implements Runnable { public void run() { while (run) { try { - LOG.info("MOB cleanup chore started ..."); - chore.cleanupObsoleteMobFiles(conf, table.getName()); - LOG.info("MOB cleanup chore finished"); + LOG.info("MOB cleanup started ..."); + MobFileCleanupUtil.cleanupObsoleteMobFiles(conf, table.getName(), admin); + LOG.info("MOB cleanup finished"); Thread.sleep(130000); } catch (Exception e) { @@ -227,7 +226,7 @@ public void runStressTest() throws InterruptedException, IOException { LOG.info("Waiting for write thread to finish ..."); writeData.join(); // Cleanup again - chore.cleanupObsoleteMobFiles(conf, table.getName()); + MobFileCleanupUtil.cleanupObsoleteMobFiles(conf, table.getName(), admin); getNumberOfMobFiles(conf, table.getName(), new String(fam)); if (HTU != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java index 69ba4ea24b29..3ad6585c4620 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java @@ -97,8 +97,6 @@ public class TestMobCompactionWithDefaults { protected int numRegions = 20; protected int rows = 1000; - protected MobFileCleanerChore cleanerChore; - protected Boolean useFileBasedSFT; public TestMobCompactionWithDefaults(Boolean useFileBasedSFT) { @@ -139,7 +137,6 @@ protected void additonalConfigSetup() { public void setUp() throws Exception { htuStart(); admin = HTU.getAdmin(); - cleanerChore = new MobFileCleanerChore(); familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true) .setMobThreshold(mobLen).setMaxVersions(1).build(); tableDescriptor = HTU.createModifyableTableDescriptor(TestMobUtils.getTableName(test)) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanupUtil.java similarity index 95% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanupUtil.java index bdc3cce13e4c..fc9eceb62412 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanupUtil.java @@ -57,11 +57,11 @@ * cleaner chore 7 Verifies that number of MOB files in a mob directory is 1. */ @Category(MediumTests.class) -public class TestMobFileCleanerChore { - private static final Logger LOG = LoggerFactory.getLogger(TestMobFileCleanerChore.class); +public class TestMobFileCleanupUtil { + private static final Logger LOG = LoggerFactory.getLogger(TestMobFileCleanupUtil.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobFileCleanerChore.class); + HBaseClassTestRule.forClass(TestMobFileCleanupUtil.class); private HBaseTestingUtil HTU; @@ -77,10 +77,9 @@ public class TestMobFileCleanerChore { private ColumnFamilyDescriptor familyDescriptor; private Admin admin; private Table table = null; - private MobFileCleanerChore chore; private long minAgeToArchive = 10000; - public TestMobFileCleanerChore() { + public TestMobFileCleanupUtil() { } @Before @@ -92,7 +91,6 @@ public void setUp() throws Exception { HTU.startMiniCluster(); admin = HTU.getAdmin(); - chore = new MobFileCleanerChore(); familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true) .setMobThreshold(mobLen).setMaxVersions(1).build(); tableDescriptor = HTU.createModifyableTableDescriptor("testMobCompactTable") @@ -168,7 +166,7 @@ public void testMobFileCleanerChore() throws InterruptedException, IOException { Thread.sleep(minAgeToArchive + 1000); LOG.info("Cleaning up MOB files"); // Cleanup - chore.cleanupObsoleteMobFiles(conf, table.getName()); + MobFileCleanupUtil.cleanupObsoleteMobFiles(conf, table.getName(), admin); // verify that nothing have happened num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); @@ -187,7 +185,7 @@ public void testMobFileCleanerChore() throws InterruptedException, IOException { Thread.sleep(minAgeToArchive + 1000); LOG.info("Cleaning up MOB files"); - chore.cleanupObsoleteMobFiles(conf, table.getName()); + MobFileCleanupUtil.cleanupObsoleteMobFiles(conf, table.getName(), admin); // check that the extra file got deleted num = getNumberOfMobFiles(conf, table.getName(), new String(fam)); From ce9eabe61661599d0b424026841eaf0087d84805 Mon Sep 17 00:00:00 2001 From: Monani Mihir Date: Wed, 15 Nov 2023 13:34:05 -0800 Subject: [PATCH 140/514] HBASE-28204 Canary can take lot more time If region starts with delete markers (#5522) Co-authored-by: Mihir Monani --- .../apache/hadoop/hbase/tool/CanaryTool.java | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index d5676263c820..d0cd199ecdc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -510,38 +510,38 @@ public Void call() { private Void readColumnFamily(Table table, ColumnFamilyDescriptor column) { byte[] startKey = null; - Get get = null; - Scan scan = null; + Scan scan = new Scan(); ResultScanner rs = null; StopWatch stopWatch = new StopWatch(); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. if (startKey.length > 0) { - get = new Get(startKey); - get.setCacheBlocks(false); - get.setFilter(new FirstKeyOnlyFilter()); - get.addFamily(column.getName()); - } else { - scan = new Scan(); - LOG.debug("rawScan {} for {}", rawScanEnabled, region.getTable()); - scan.setRaw(rawScanEnabled); - scan.setCaching(1); - scan.setCacheBlocks(false); - scan.setFilter(new FirstKeyOnlyFilter()); - scan.addFamily(column.getName()); - scan.setMaxResultSize(1L); - scan.setOneRowLimit(); + // There are 4 types of region for any table. + // 1. Start and End key are empty. (Table with Single region) + // 2. Start key is empty. (First region of the table) + // 3. End key is empty. (Last region of the table) + // 4. Region with Start & End key. (All the regions between first & last region of the + // table.) + // + // Since Scan only takes Start and/or End Row and doesn't accept the region ID, + // we set the start row when Regions are of type 3 OR 4 as mentioned above. + // For type 1 and 2, We don't need to set this option. + scan.withStartRow(startKey); } + LOG.debug("rawScan {} for {}", rawScanEnabled, region.getTable()); + scan.setRaw(rawScanEnabled); + scan.setCaching(1); + scan.setCacheBlocks(false); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.addFamily(column.getName()); + scan.setMaxResultSize(1L); + scan.setOneRowLimit(); LOG.debug("Reading from {} {} {} {}", region.getTable(), region.getRegionNameAsString(), column.getNameAsString(), Bytes.toStringBinary(startKey)); try { stopWatch.start(); - if (startKey.length > 0) { - table.get(get); - } else { - rs = table.getScanner(scan); - rs.next(); - } + rs = table.getScanner(scan); + rs.next(); stopWatch.stop(); this.readWriteLatency.add(stopWatch.getTime()); sink.publishReadTiming(serverName, region, column, stopWatch.getTime()); From 364fcea01c45921b806edea80c8a01e08608dd8d Mon Sep 17 00:00:00 2001 From: Rahul Agarkar Date: Wed, 2 Aug 2023 23:06:30 +0530 Subject: [PATCH 141/514] =?UTF-8?q?HBASE-27997=20Enhance=20prefetch=20exec?= =?UTF-8?q?utor=20to=20record=20region=20prefetch=20infor=E2=80=A6=20(#533?= =?UTF-8?q?9)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Wellington Chevreuil Reviewew-by: Kota-SH --- .../main/protobuf/PrefetchPersistence.proto | 7 ++++- .../hbase/io/hfile/HFilePreadReader.java | 10 +++++++ .../hbase/io/hfile/PrefetchProtoUtils.java | 26 ++++++++++++++++--- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto b/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto index d1a2b4cfd1b7..a024b94baa62 100644 --- a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto +++ b/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto @@ -27,5 +27,10 @@ option optimize_for = SPEED; message PrefetchedHfileName { - map prefetched_files = 1; + map prefetched_files = 1; +} + +message RegionFileSizeMap { + required string region_name = 1; + required uint64 region_prefetch_size = 2; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index f1579ea53b8e..2079dcafb65f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -132,6 +132,7 @@ public void run() { LOG.warn("Close prefetch stream reader failed, path: " + path, e); } } + String regionName = getRegionName(path); PrefetchExecutor.complete(path); } } @@ -139,6 +140,15 @@ public void run() { } } + /* + * Get the region name for the given file path. A HFile is always kept under the //. To find the region for a given hFile, just find the name of the grandparent + * directory. + */ + private static String getRegionName(Path path) { + return path.getParent().getParent().getName(); + } + private static String getPathOffsetEndStr(final Path path, final long offset, final long end) { return "path=" + path.toString() + ", offset=" + offset + ", end=" + end; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java index e75e8a6a6522..df67e4429a2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.shaded.protobuf.generated.PersistentPrefetchProtos; @@ -26,8 +28,26 @@ private PrefetchProtoUtils() { } static PersistentPrefetchProtos.PrefetchedHfileName - toPB(Map prefetchedHfileNames) { - return PersistentPrefetchProtos.PrefetchedHfileName.newBuilder() - .putAllPrefetchedFiles(prefetchedHfileNames).build(); + toPB(Map> prefetchedHfileNames) { + Map tmpMap = new HashMap<>(); + prefetchedHfileNames.forEach((hFileName, regionPrefetchMap) -> { + PersistentPrefetchProtos.RegionFileSizeMap tmpRegionFileSize = + PersistentPrefetchProtos.RegionFileSizeMap.newBuilder() + .setRegionName(regionPrefetchMap.getFirst()) + .setRegionPrefetchSize(regionPrefetchMap.getSecond()).build(); + tmpMap.put(hFileName, tmpRegionFileSize); + }); + return PersistentPrefetchProtos.PrefetchedHfileName.newBuilder().putAllPrefetchedFiles(tmpMap) + .build(); + } + + static Map> + fromPB(Map prefetchHFileNames) { + Map> hFileMap = new HashMap<>(); + prefetchHFileNames.forEach((hFileName, regionPrefetchMap) -> { + hFileMap.put(hFileName, + new Pair<>(regionPrefetchMap.getRegionName(), regionPrefetchMap.getRegionPrefetchSize())); + }); + return hFileMap; } } From 9e74cc0d655badccdc300bc485b7ffb02b0606a8 Mon Sep 17 00:00:00 2001 From: Rahul Agarkar Date: Tue, 29 Aug 2023 15:36:23 +0530 Subject: [PATCH 142/514] =?UTF-8?q?HBASE-27998=20Enhance=20region=20metric?= =?UTF-8?q?s=20to=20include=20prefetch=20ratio=20for=20each=E2=80=A6=20(#5?= =?UTF-8?q?342)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Wellington Chevreuil --- .../apache/hadoop/hbase/RegionMetrics.java | 6 ++ .../hadoop/hbase/RegionMetricsBuilder.java | 38 +++++++- .../apache/hadoop/hbase/ServerMetrics.java | 6 ++ .../hadoop/hbase/ServerMetricsBuilder.java | 20 ++++- .../main/protobuf/PrefetchPersistence.proto | 36 -------- .../main/protobuf/server/ClusterStatus.proto | 11 +++ .../protobuf/server/io/BucketCacheEntry.proto | 8 +- .../hadoop/hbase/io/hfile/BlockCache.java | 3 +- .../hbase/io/hfile/CombinedBlockCache.java | 3 +- .../hbase/io/hfile/HFilePreadReader.java | 4 +- .../hbase/io/hfile/PrefetchProtoUtils.java | 53 ------------ .../hbase/io/hfile/bucket/BucketCache.java | 86 ++++++++++++++++--- .../io/hfile/bucket/BucketProtoUtils.java | 26 +++++- .../hbase/regionserver/HRegionServer.java | 40 ++++++++- .../hadoop/hbase/TestServerMetrics.java | 18 ++-- .../master/TestRegionsRecoveryChore.java | 14 +++ 16 files changed, 250 insertions(+), 122 deletions(-) delete mode 100644 hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 47b36a7a1516..b029d0288564 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -138,4 +138,10 @@ default String getNameAsString() { /** Returns the compaction state of this region */ CompactionState getCompactionState(); + + /** Returns the total size of the hfiles in the region */ + Size getRegionSizeMB(); + + /** Returns current prefetch ratio of this region on this server */ + float getCurrentRegionCachedRatio(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index 43b3a17aac17..d3361693079a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -80,7 +80,8 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio ClusterStatusProtos.StoreSequenceId::getSequenceId))) .setUncompressedStoreFileSize( new Size(regionLoadPB.getStoreUncompressedSizeMB(), Size.Unit.MEGABYTE)) - .build(); + .setRegionSizeMB(new Size(regionLoadPB.getRegionSizeMB(), Size.Unit.MEGABYTE)) + .setCurrentRegionCachedRatio(regionLoadPB.getCurrentRegionCachedRatio()).build(); } private static List @@ -120,7 +121,8 @@ public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMe .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) .setStoreUncompressedSizeMB( (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) - .build(); + .setRegionSizeMB((int) regionMetrics.getRegionSizeMB().get(Size.Unit.MEGABYTE)) + .setCurrentRegionCachedRatio(regionMetrics.getCurrentRegionCachedRatio()).build(); } public static RegionMetricsBuilder newBuilder(byte[] name) { @@ -154,6 +156,8 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWithSsdWeight; private long blocksTotalWeight; private CompactionState compactionState; + private Size regionSizeMB = Size.ZERO; + private float currentRegionCachedRatio; private RegionMetricsBuilder(byte[] name) { this.name = name; @@ -289,6 +293,16 @@ public RegionMetricsBuilder setCompactionState(CompactionState compactionState) return this; } + public RegionMetricsBuilder setRegionSizeMB(Size value) { + this.regionSizeMB = value; + return this; + } + + public RegionMetricsBuilder setCurrentRegionCachedRatio(float value) { + this.currentRegionCachedRatio = value; + return this; + } + public RegionMetrics build() { return new RegionMetricsImpl(name, storeCount, storeFileCount, storeRefCount, maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, storeFileSize, @@ -296,7 +310,7 @@ public RegionMetrics build() { uncompressedStoreFileSize, writeRequestCount, readRequestCount, cpRequestCount, filteredReadRequestCount, completedSequenceId, storeSequenceIds, dataLocality, lastMajorCompactionTimestamp, dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, - blocksTotalWeight, compactionState); + blocksTotalWeight, compactionState, regionSizeMB, currentRegionCachedRatio); } private static class RegionMetricsImpl implements RegionMetrics { @@ -327,6 +341,8 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; private final CompactionState compactionState; + private final Size regionSizeMB; + private final float currentRegionCachedRatio; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int storeRefCount, int maxCompactedStoreFileRefCount, final long compactingCellCount, long compactedCellCount, @@ -336,7 +352,7 @@ private static class RegionMetricsImpl implements RegionMetrics { long filteredReadRequestCount, long completedSequenceId, Map storeSequenceIds, float dataLocality, long lastMajorCompactionTimestamp, float dataLocalityForSsd, long blocksLocalWeight, long blocksLocalWithSsdWeight, long blocksTotalWeight, - CompactionState compactionState) { + CompactionState compactionState, Size regionSizeMB, float currentRegionCachedRatio) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -364,6 +380,8 @@ private static class RegionMetricsImpl implements RegionMetrics { this.blocksLocalWithSsdWeight = blocksLocalWithSsdWeight; this.blocksTotalWeight = blocksTotalWeight; this.compactionState = compactionState; + this.regionSizeMB = regionSizeMB; + this.currentRegionCachedRatio = currentRegionCachedRatio; } @Override @@ -501,6 +519,16 @@ public CompactionState getCompactionState() { return compactionState; } + @Override + public Size getRegionSizeMB() { + return regionSizeMB; + } + + @Override + public float getCurrentRegionCachedRatio() { + return currentRegionCachedRatio; + } + @Override public String toString() { StringBuilder sb = @@ -541,6 +569,8 @@ public String toString() { Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", blocksLocalWithSsdWeight); Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); Strings.appendKeyValue(sb, "compactionState", compactionState); + Strings.appendKeyValue(sb, "regionSizeMB", regionSizeMB); + Strings.appendKeyValue(sb, "currentRegionCachedRatio", currentRegionCachedRatio); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index 2684886ba3d5..2cf55a1abdc0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -106,4 +106,10 @@ default String getVersion() { @Nullable List getTasks(); + /** + * Returns the region cache information for the regions hosted on this server + * @return map of region encoded name and the size of the region cached on this region server + * rounded to MB + */ + Map getRegionCachedInfo(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 7a0312f22fdc..c7aea21e845a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -85,6 +85,7 @@ public static ServerMetrics toServerMetrics(ServerName serverName, int versionNu : null) .setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask) .collect(Collectors.toList())) + .setRegionCachedInfo(serverLoadPB.getRegionCachedInfoMap()) .setReportTimestamp(serverLoadPB.getReportEndTime()) .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) .setVersion(version).build(); @@ -111,6 +112,7 @@ public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) .addAllTasks( metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList())) + .putAllRegionCachedInfo(metrics.getRegionCachedInfo()) .setReportStartTime(metrics.getLastReportTimestamp()) .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { @@ -142,6 +144,7 @@ public static ServerMetricsBuilder newBuilder(ServerName sn) { private long reportTimestamp = EnvironmentEdgeManager.currentTime(); private long lastReportTimestamp = 0; private final List tasks = new ArrayList<>(); + private Map regionCachedInfo = new HashMap<>(); private ServerMetricsBuilder(ServerName serverName) { this.serverName = serverName; @@ -232,11 +235,16 @@ public ServerMetricsBuilder setTasks(List tasks) { return this; } + public ServerMetricsBuilder setRegionCachedInfo(Map value) { + this.regionCachedInfo = value; + return this; + } + public ServerMetrics build() { return new ServerMetricsImpl(serverName, versionNumber, version, requestCountPerSecond, requestCount, readRequestCount, writeRequestCount, usedHeapSize, maxHeapSize, infoServerPort, sources, sink, regionStatus, coprocessorNames, reportTimestamp, lastReportTimestamp, - userMetrics, tasks); + userMetrics, tasks, regionCachedInfo); } private static class ServerMetricsImpl implements ServerMetrics { @@ -259,13 +267,15 @@ private static class ServerMetricsImpl implements ServerMetrics { private final long lastReportTimestamp; private final Map userMetrics; private final List tasks; + private final Map regionCachedInfo; ServerMetricsImpl(ServerName serverName, int versionNumber, String version, long requestCountPerSecond, long requestCount, long readRequestsCount, long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, int infoServerPort, List sources, ReplicationLoadSink sink, Map regionStatus, Set coprocessorNames, long reportTimestamp, - long lastReportTimestamp, Map userMetrics, List tasks) { + long lastReportTimestamp, Map userMetrics, List tasks, + Map regionCachedInfo) { this.serverName = Preconditions.checkNotNull(serverName); this.versionNumber = versionNumber; this.version = version; @@ -284,6 +294,7 @@ private static class ServerMetricsImpl implements ServerMetrics { this.reportTimestamp = reportTimestamp; this.lastReportTimestamp = lastReportTimestamp; this.tasks = tasks; + this.regionCachedInfo = regionCachedInfo; } @Override @@ -386,6 +397,11 @@ public List getTasks() { return tasks; } + @Override + public Map getRegionCachedInfo() { + return Collections.unmodifiableMap(regionCachedInfo); + } + @Override public String toString() { int storeCount = 0; diff --git a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto b/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto deleted file mode 100644 index a024b94baa62..000000000000 --- a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated"; -option java_outer_classname = "PersistentPrefetchProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - - -message PrefetchedHfileName { - map prefetched_files = 1; -} - -message RegionFileSizeMap { - required string region_name = 1; - required uint64 region_prefetch_size = 2; -} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 28cc5a865c23..58fd3c8d2a5b 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -177,6 +177,12 @@ message RegionLoad { MAJOR = 2; MAJOR_AND_MINOR = 3; } + + /** Total region size in MB */ + optional uint32 region_size_MB = 28; + + /** Current region cache ratio on this server */ + optional float current_region_cached_ratio = 29; } message UserLoad { @@ -315,6 +321,11 @@ message ServerLoad { * The active monitored tasks */ repeated ServerTask tasks = 15; + + /** + * The metrics for region cached on this region server + */ + map regionCachedInfo = 16; } message LiveServerInfo { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto index ae1980fe51e6..80fc10ada786 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto @@ -32,7 +32,7 @@ message BucketCacheEntry { map deserializers = 4; required BackingMap backing_map = 5; optional bytes checksum = 6; - map prefetched_files = 7; + map cached_files = 7; } message BackingMap { @@ -81,3 +81,9 @@ enum BlockPriority { multi = 1; memory = 2; } + +message RegionFileSizeMap { + required string region_name = 1; + required uint64 region_cached_size = 2; +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index e480c9b5789b..91ebaaabd422 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -20,6 +20,7 @@ import java.util.Iterator; import java.util.Map; import java.util.Optional; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; /** @@ -167,7 +168,7 @@ default boolean isMetaBlock(BlockType blockType) { /** * Returns the list of fully cached files */ - default Optional> getFullyCachedFiles() { + default Optional>> getFullyCachedFiles() { return Optional.empty(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index a421dfc83aa0..1e0fe7709292 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -22,6 +22,7 @@ import java.util.Optional; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -431,7 +432,7 @@ public BlockCache[] getBlockCaches() { * Returns the list of fully cached files */ @Override - public Optional> getFullyCachedFiles() { + public Optional>> getFullyCachedFiles() { return this.l2Cache.getFullyCachedFiles(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 2079dcafb65f..7cdbd5aff486 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -115,7 +115,8 @@ public void run() { block.release(); } } - bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path.getName())); + final long fileSize = offset; + bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path, fileSize)); } catch (IOException e) { // IOExceptions are probably due to region closes (relocation, etc.) if (LOG.isTraceEnabled()) { @@ -132,7 +133,6 @@ public void run() { LOG.warn("Close prefetch stream reader failed, path: " + path, e); } } - String regionName = getRegionName(path); PrefetchExecutor.complete(path); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java deleted file mode 100644 index df67e4429a2d..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.util.HashMap; -import java.util.Map; -import org.apache.hadoop.hbase.util.Pair; - -import org.apache.hadoop.hbase.shaded.protobuf.generated.PersistentPrefetchProtos; - -final class PrefetchProtoUtils { - private PrefetchProtoUtils() { - } - - static PersistentPrefetchProtos.PrefetchedHfileName - toPB(Map> prefetchedHfileNames) { - Map tmpMap = new HashMap<>(); - prefetchedHfileNames.forEach((hFileName, regionPrefetchMap) -> { - PersistentPrefetchProtos.RegionFileSizeMap tmpRegionFileSize = - PersistentPrefetchProtos.RegionFileSizeMap.newBuilder() - .setRegionName(regionPrefetchMap.getFirst()) - .setRegionPrefetchSize(regionPrefetchMap.getSecond()).build(); - tmpMap.put(hFileName, tmpRegionFileSize); - }); - return PersistentPrefetchProtos.PrefetchedHfileName.newBuilder().putAllPrefetchedFiles(tmpMap) - .build(); - } - - static Map> - fromPB(Map prefetchHFileNames) { - Map> hFileMap = new HashMap<>(); - prefetchHFileNames.forEach((hFileName, regionPrefetchMap) -> { - hFileMap.put(hFileName, - new Pair<>(regionPrefetchMap.getRegionName(), regionPrefetchMap.getRegionPrefetchSize())); - }); - return hFileMap; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e3d740383085..ca7750c92c56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; @@ -51,6 +52,7 @@ import java.util.function.Consumer; import java.util.function.Function; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; @@ -79,6 +81,7 @@ import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef; import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool; import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool.ReferenceType; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -151,8 +154,17 @@ public class BucketCache implements BlockCache, HeapSize { private AtomicBoolean backingMapValidated = new AtomicBoolean(false); - /** Set of files for which prefetch is completed */ - final Map fullyCachedFiles = new ConcurrentHashMap<>(); + /** + * Map of hFile -> Region -> File size. This map is used to track all files completed prefetch, + * together with the region those belong to and the total cached size for the + * region.TestBlockEvictionOnRegionMovement + */ + final Map> fullyCachedFiles = new ConcurrentHashMap<>(); + /** + * Map of region -> total size of the region prefetched on this region server. This is the total + * size of hFiles for this region prefetched on this region server + */ + final Map regionCachedSizeMap = new ConcurrentHashMap<>(); private BucketCachePersister cachePersister; @@ -546,7 +558,6 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach } else { this.blockNumber.increment(); this.heapSize.add(cachedItem.heapSize()); - blocksByHFile.add(cacheKey); } } @@ -636,15 +647,11 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); } if (ioEngine.isPersistent()) { - fullyCachedFiles.remove(cacheKey.getHfileName()); + removeFileFromPrefetch(cacheKey.getHfileName()); setCacheInconsistent(true); } } - public void fileCacheCompleted(String fileName) { - fullyCachedFiles.put(fileName, true); - } - /** * Free the {{@link BucketEntry} actually,which could only be invoked when the * {@link BucketEntry#refCnt} becoming 0. @@ -1300,6 +1307,10 @@ public boolean isCachePersistent() { return ioEngine.isPersistent() && persistencePath != null; } + public Map getRegionCachedInfo() { + return Collections.unmodifiableMap(regionCachedSizeMap); + } + /** * @see #persistToFile() */ @@ -1337,6 +1348,29 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { } } + private void updateRegionSizeMapWhileRetrievingFromFile() { + // Update the regionCachedSizeMap with the region size while restarting the region server + if (LOG.isDebugEnabled()) { + LOG.debug("Updating region size map after retrieving cached file list"); + dumpPrefetchList(); + } + regionCachedSizeMap.clear(); + fullyCachedFiles.forEach((hFileName, hFileSize) -> { + // Get the region name for each file + String regionEncodedName = hFileSize.getFirst(); + long cachedFileSize = hFileSize.getSecond(); + regionCachedSizeMap.merge(regionEncodedName, cachedFileSize, + (oldpf, fileSize) -> oldpf + fileSize); + }); + } + + private void dumpPrefetchList() { + for (Map.Entry> outerEntry : fullyCachedFiles.entrySet()) { + LOG.debug("Cached File Entry:<{},<{},{}>>", outerEntry.getKey(), + outerEntry.getValue().getFirst(), outerEntry.getValue().getSecond()); + } + } + /** * Create an input stream that deletes the file after reading it. Use in try-with-resources to * avoid this pattern where an exception thrown from a finally block may mask earlier exceptions: @@ -1401,7 +1435,7 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), this::createRecycler); fullyCachedFiles.clear(); - fullyCachedFiles.putAll(proto.getPrefetchedFilesMap()); + fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap())); if (proto.hasChecksum()) { try { ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), @@ -1444,6 +1478,7 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio LOG.info("Persistent file is old format, it does not support verifying file integrity!"); backingMapValidated.set(true); } + updateRegionSizeMapWhileRetrievingFromFile(); verifyCapacityAndClasses(proto.getCacheCapacity(), proto.getIoClass(), proto.getMapClass()); } @@ -1581,7 +1616,7 @@ protected String getAlgorithm() { */ @Override public int evictBlocksByHfileName(String hfileName) { - this.fullyCachedFiles.remove(hfileName); + removeFileFromPrefetch(hfileName); Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); @@ -1966,7 +2001,7 @@ public AtomicBoolean getBackingMapValidated() { } @Override - public Optional> getFullyCachedFiles() { + public Optional>> getFullyCachedFiles() { return Optional.of(fullyCachedFiles); } @@ -1985,4 +2020,33 @@ public static Optional getBucketCacheFromCacheConfig(CacheConfig ca return Optional.empty(); } + private void removeFileFromPrefetch(String hfileName) { + // Update the regionPrefetchedSizeMap before removing the file from prefetchCompleted + if (fullyCachedFiles.containsKey(hfileName)) { + Pair regionEntry = fullyCachedFiles.get(hfileName); + String regionEncodedName = regionEntry.getFirst(); + long filePrefetchSize = regionEntry.getSecond(); + LOG.debug("Removing file {} for region {}", hfileName, regionEncodedName); + regionCachedSizeMap.computeIfPresent(regionEncodedName, (rn, pf) -> pf - filePrefetchSize); + // If all the blocks for a region are evicted from the cache, remove the entry for that region + if ( + regionCachedSizeMap.containsKey(regionEncodedName) + && regionCachedSizeMap.get(regionEncodedName) == 0 + ) { + regionCachedSizeMap.remove(regionEncodedName); + } + } + fullyCachedFiles.remove(hfileName); + } + + public void fileCacheCompleted(Path filePath, long size) { + Pair pair = new Pair<>(); + // sets the region name + String regionName = filePath.getParent().getParent().getName(); + pair.setFirst(regionName); + pair.setSecond(size); + fullyCachedFiles.put(filePath.getName(), pair); + regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf + fileSize); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index 8830e5d3255a..7cc5050506e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; @@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; @@ -45,7 +47,7 @@ static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { .setIoClass(cache.ioEngine.getClass().getName()) .setMapClass(cache.backingMap.getClass().getName()) .putAllDeserializers(CacheableDeserializerIdManager.save()) - .putAllPrefetchedFiles(cache.fullyCachedFiles) + .putAllCachedFiles(toCachedPB(cache.fullyCachedFiles)) .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) .setChecksum(ByteString .copyFrom(((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) @@ -185,4 +187,26 @@ private static BlockType fromPb(BucketCacheProtos.BlockType blockType) { throw new Error("Unrecognized BlockType."); } } + + static Map + toCachedPB(Map> prefetchedHfileNames) { + Map tmpMap = new HashMap<>(); + prefetchedHfileNames.forEach((hfileName, regionPrefetchMap) -> { + BucketCacheProtos.RegionFileSizeMap tmpRegionFileSize = + BucketCacheProtos.RegionFileSizeMap.newBuilder().setRegionName(regionPrefetchMap.getFirst()) + .setRegionCachedSize(regionPrefetchMap.getSecond()).build(); + tmpMap.put(hfileName, tmpRegionFileSize); + }); + return tmpMap; + } + + static Map> + fromPB(Map prefetchHFileNames) { + Map> hfileMap = new HashMap<>(); + prefetchHFileNames.forEach((hfileName, regionPrefetchMap) -> { + hfileMap.put(hfileName, + new Pair<>(regionPrefetchMap.getRegionName(), regionPrefetchMap.getRegionCachedSize())); + }); + return hfileMap; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 85721a354977..3042a2eae451 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -65,10 +65,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; import java.util.stream.Collectors; import javax.management.MalformedObjectNameException; import javax.servlet.http.HttpServlet; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.mutable.MutableFloat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -108,7 +110,9 @@ import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; +import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcClient; @@ -239,6 +243,9 @@ public class HRegionServer extends HBaseServerBase private static final Logger LOG = LoggerFactory.getLogger(HRegionServer.class); + int unitMB = 1024 * 1024; + int unitKB = 1024; + /** * For testing only! Set to true to skip notifying region assignment to master . */ @@ -1211,6 +1218,11 @@ private ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, lon serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build()); } } + computeIfPersistentBucketCache(bc -> { + bc.getRegionCachedInfo().forEach((regionName, prefetchSize) -> { + serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize, unitMB)); + }); + }); serverLoad.setReportStartTime(reportStartTime); serverLoad.setReportEndTime(reportEndTime); if (this.infoServer != null) { @@ -1510,6 +1522,15 @@ private static int roundSize(long sizeInByte, int sizeUnit) { } } + private void computeIfPersistentBucketCache(Consumer computation) { + if (blockCache instanceof CombinedBlockCache) { + BlockCache l2 = ((CombinedBlockCache) blockCache).getSecondLevelCache(); + if (l2 instanceof BucketCache && ((BucketCache) l2).isCachePersistent()) { + computation.accept((BucketCache) l2); + } + } + } + /** * @param r Region to get RegionLoad for. * @param regionLoadBldr the RegionLoad.Builder, can be null @@ -1519,6 +1540,7 @@ private static int roundSize(long sizeInByte, int sizeUnit) { RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, RegionSpecifier.Builder regionSpecifier) throws IOException { byte[] name = r.getRegionInfo().getRegionName(); + String regionEncodedName = r.getRegionInfo().getEncodedName(); int stores = 0; int storefiles = 0; int storeRefCount = 0; @@ -1531,6 +1553,7 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, long totalStaticBloomSize = 0L; long totalCompactingKVs = 0L; long currentCompactedKVs = 0L; + long totalRegionSize = 0L; List storeList = r.getStores(); stores += storeList.size(); for (HStore store : storeList) { @@ -1542,6 +1565,7 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); storeUncompressedSize += store.getStoreSizeUncompressed(); storefileSize += store.getStorefilesSize(); + totalRegionSize += store.getHFilesSize(); // TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB? storefileIndexSize += store.getStorefilesRootLevelIndexSize(); CompactionProgress progress = store.getCompactionProgress(); @@ -1554,9 +1578,6 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, totalStaticBloomSize += store.getTotalStaticBloomSize(); } - int unitMB = 1024 * 1024; - int unitKB = 1024; - int memstoreSizeMB = roundSize(r.getMemStoreDataSize(), unitMB); int storeUncompressedSizeMB = roundSize(storeUncompressedSize, unitMB); int storefileSizeMB = roundSize(storefileSize, unitMB); @@ -1564,6 +1585,16 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, int rootLevelIndexSizeKB = roundSize(rootLevelIndexSize, unitKB); int totalStaticIndexSizeKB = roundSize(totalStaticIndexSize, unitKB); int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB); + int regionSizeMB = roundSize(totalRegionSize, unitMB); + final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f); + computeIfPersistentBucketCache(bc -> { + if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) { + currentRegionCachedRatio.setValue(regionSizeMB == 0 + ? 0.0f + : (float) roundSize(bc.getRegionCachedInfo().get(regionEncodedName), unitMB) + / regionSizeMB); + } + }); HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname()); @@ -1594,7 +1625,8 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, .setDataLocalityForSsd(dataLocalityForSsd).setBlocksLocalWeight(blocksLocalWeight) .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight).setBlocksTotalWeight(blocksTotalWeight) .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) - .setLastMajorCompactionTs(r.getOldestHfileTs(true)); + .setLastMajorCompactionTs(r.getOldestHfileTs(true)).setRegionSizeMB(regionSizeMB) + .setCurrentRegionCachedRatio(currentRegionCachedRatio.floatValue()); r.setCompleteSequenceId(regionLoadBldr); return regionLoadBldr.build(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java index 8bcf3e600f88..8dfb8b1a4632 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java @@ -60,6 +60,10 @@ public void testRegionLoadAggregation() { metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getCpRequestCount()).sum()); assertEquals(300, metrics.getRegionMetrics().values().stream() .mapToLong(v -> v.getFilteredReadRequestCount()).sum()); + assertEquals(2, metrics.getRegionMetrics().values().stream() + .mapToLong(v -> (long) v.getCurrentRegionCachedRatio()).count()); + assertEquals(150, metrics.getRegionMetrics().values().stream() + .mapToDouble(v -> v.getRegionSizeMB().get(Size.Unit.MEGABYTE)).sum(), 0); } @Test @@ -99,12 +103,14 @@ private ClusterStatusProtos.ServerLoad createServerLoadProto() { ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) - .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); - ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(rSpecTwo).setStores(3).setStorefiles(13).setStoreUncompressedSizeMB(23) - .setStorefileSizeMB(300).setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40) - .setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE) - .setWriteRequestsCount(Integer.MAX_VALUE).setCpRequestsCount(100).build(); + .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) + .setRegionSizeMB(100).setCurrentRegionCachedRatio(0.9f).build(); + ClusterStatusProtos.RegionLoad rlTwo = + ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) + .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) + .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) + .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) + .setCpRequestsCount(100).setRegionSizeMB(50).setCurrentRegionCachedRatio(1.0f).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() .addRegionLoads(rlOne).addRegionLoads(rlTwo).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index 594db4d7c303..31fcf9fd47f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -400,6 +400,10 @@ public List getTasks() { return null; } + @Override + public Map getRegionCachedInfo() { + return new HashMap<>(); + } }; return serverMetrics; } @@ -541,6 +545,16 @@ public long getBlocksTotalWeight() { public CompactionState getCompactionState() { return null; } + + @Override + public Size getRegionSizeMB() { + return null; + } + + @Override + public float getCurrentRegionCachedRatio() { + return 0.0f; + } }; return regionMetrics; } From a6bc2107624a9b8c1d66e239c33d454413a1c2e3 Mon Sep 17 00:00:00 2001 From: judilsteve <4328790+judilsteve@users.noreply.github.com> Date: Sat, 18 Nov 2023 02:23:11 +0800 Subject: [PATCH 143/514] HBASE-28174 (DELETE endpoint in REST API does not support deleting binary row keys/columns Signed-off-by: Wellington Chevreuil --- .../apache/hadoop/hbase/rest/Constants.java | 1 + .../hadoop/hbase/rest/MultiRowResource.java | 9 ++- .../apache/hadoop/hbase/rest/RowResource.java | 4 +- .../org/apache/hadoop/hbase/rest/RowSpec.java | 33 ++++++++ .../hadoop/hbase/rest/TableResource.java | 16 +++- .../hadoop/hbase/rest/RowResourceBase.java | 54 +++++++++++++ .../hadoop/hbase/rest/TestDeleteRow.java | 34 +++++++++ .../hbase/rest/TestGetAndPutResource.java | 68 +++++++++++++++++ .../hbase/rest/TestMultiRowResource.java | 71 ++++++++++++++++++ .../asciidoc/_chapters/external_apis.adoc | 75 +++++++++++++++++++ 10 files changed, 357 insertions(+), 8 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index af8b9e303bdf..f0d1edc986af 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -85,6 +85,7 @@ public interface Constants { String CUSTOM_FILTERS = "hbase.rest.custom.filters"; String ROW_KEYS_PARAM_NAME = "row"; + String KEY_ENCODING_QUERY_PARAM_NAME = "e"; /** * If this query parameter is present when processing row or scanner resources, it disables server * side block caching diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index cc5fb22265c8..82900135dc40 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -29,6 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.javax.ws.rs.GET; +import org.apache.hbase.thirdparty.javax.ws.rs.HeaderParam; import org.apache.hbase.thirdparty.javax.ws.rs.Produces; import org.apache.hbase.thirdparty.javax.ws.rs.core.Context; import org.apache.hbase.thirdparty.javax.ws.rs.core.MultivaluedMap; @@ -63,14 +64,18 @@ public MultiRowResource(TableResource tableResource, String versions, String col @GET @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) - public Response get(final @Context UriInfo uriInfo) { + public Response get(final @Context UriInfo uriInfo, + final @HeaderParam("Encoding") String keyEncodingHeader) { MultivaluedMap params = uriInfo.getQueryParameters(); + String keyEncoding = (keyEncodingHeader != null) + ? keyEncodingHeader + : params.getFirst(KEY_ENCODING_QUERY_PARAM_NAME); servlet.getMetrics().incrementRequests(1); try { CellSetModel model = new CellSetModel(); for (String rk : params.get(ROW_KEYS_PARAM_NAME)) { - RowSpec rowSpec = new RowSpec(rk); + RowSpec rowSpec = new RowSpec(rk, keyEncoding); if (this.versions != null) { rowSpec.setMaxVersions(this.versions); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index b599b0b19491..1f0c75ae4814 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -72,10 +72,10 @@ public class RowResource extends ResourceBase { * Constructor */ public RowResource(TableResource tableResource, String rowspec, String versions, String check, - String returnResult) throws IOException { + String returnResult, String keyEncoding) throws IOException { super(); this.tableResource = tableResource; - this.rowspec = new RowSpec(rowspec); + this.rowspec = new RowSpec(rowspec, keyEncoding); if (versions != null) { this.rowspec.setMaxVersions(Integer.parseInt(versions)); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index c9993336fa14..e1559dd67233 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -20,6 +20,7 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.util.ArrayList; +import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -47,6 +48,10 @@ public class RowSpec { private int maxValues = Integer.MAX_VALUE; public RowSpec(String path) throws IllegalArgumentException { + this(path, null); + } + + public RowSpec(String path, String keyEncoding) throws IllegalArgumentException { int i = 0; while (path.charAt(i) == '/') { i++; @@ -55,6 +60,34 @@ public RowSpec(String path) throws IllegalArgumentException { i = parseColumns(path, i); i = parseTimestamp(path, i); i = parseQueryParams(path, i); + + if (keyEncoding != null) { + // See https://en.wikipedia.org/wiki/Base64#Variants_summary_table + Base64.Decoder decoder; + switch (keyEncoding) { + case "b64": + case "base64": + case "b64url": + case "base64url": + decoder = Base64.getUrlDecoder(); + break; + case "b64basic": + case "base64basic": + decoder = Base64.getDecoder(); + break; + default: + throw new IllegalArgumentException("unknown key encoding '" + keyEncoding + "'"); + } + this.row = decoder.decode(this.row); + if (this.endRow != null) { + this.endRow = decoder.decode(this.endRow); + } + TreeSet decodedColumns = new TreeSet<>(Bytes.BYTES_COMPARATOR); + for (byte[] encodedColumn : this.columns) { + decodedColumns.add(decoder.decode(encodedColumn)); + } + this.columns = decodedColumns; + } } private int parseRowKeys(final String path, int i) throws IllegalArgumentException { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index dbac4686520f..3a89d79a3986 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -35,6 +35,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.DefaultValue; import org.apache.hbase.thirdparty.javax.ws.rs.Encoded; +import org.apache.hbase.thirdparty.javax.ws.rs.HeaderParam; import org.apache.hbase.thirdparty.javax.ws.rs.Path; import org.apache.hbase.thirdparty.javax.ws.rs.PathParam; import org.apache.hbase.thirdparty.javax.ws.rs.QueryParam; @@ -94,9 +95,12 @@ public RowResource getRowResource( // We need the @Encoded decorator so Jersey won't urldecode before // the RowSpec constructor has a chance to parse final @PathParam("rowspec") @Encoded String rowspec, final @QueryParam("v") String versions, - final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) + final @QueryParam("check") String check, final @QueryParam("rr") String returnResult, + final @HeaderParam("Encoding") String keyEncodingHeader, + final @QueryParam(Constants.KEY_ENCODING_QUERY_PARAM_NAME) String keyEncodingQuery) throws IOException { - return new RowResource(this, rowspec, versions, check, returnResult); + String keyEncoding = (keyEncodingHeader != null) ? keyEncodingHeader : keyEncodingQuery; + return new RowResource(this, rowspec, versions, check, returnResult, keyEncoding); } @Path("{suffixglobbingspec: .*\\*/.+}") @@ -105,8 +109,12 @@ public RowResource getRowResourceWithSuffixGlobbing( // the RowSpec constructor has a chance to parse final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, final @QueryParam("v") String versions, final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { - return new RowResource(this, suffixglobbingspec, versions, check, returnResult); + final @QueryParam("rr") String returnResult, + final @HeaderParam("Encoding") String keyEncodingHeader, + final @QueryParam(Constants.KEY_ENCODING_QUERY_PARAM_NAME) String keyEncodingQuery) + throws IOException { + String keyEncoding = (keyEncodingHeader != null) ? keyEncodingHeader : keyEncodingQuery; + return new RowResource(this, suffixglobbingspec, versions, check, returnResult, keyEncoding); } @Path("{scanspec: .*[*]$}") diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java index 27de4c5803c4..6373c8515e01 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -23,6 +23,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.StringWriter; +import java.util.Base64; import java.util.HashMap; import java.util.Map; import javax.xml.bind.JAXBContext; @@ -43,6 +44,8 @@ import org.apache.hadoop.hbase.rest.model.CellSetModel; import org.apache.hadoop.hbase.rest.model.RowModel; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -464,6 +467,15 @@ protected static Response getValueXML(String url) throws IOException { return response; } + protected static Response getValueXML(String url, Header[] headers) throws IOException { + Header[] fullHeaders = new Header[headers.length + 1]; + for (int i = 0; i < headers.length; i++) + fullHeaders[i] = headers[i]; + fullHeaders[headers.length] = new BasicHeader("Accept", Constants.MIMETYPE_XML); + Response response = client.get(url, fullHeaders); + return response; + } + protected static Response getValueJson(String url) throws IOException { Response response = client.get(url, Constants.MIMETYPE_JSON); return response; @@ -483,6 +495,28 @@ protected static Response deleteValue(String table, String row, String column) return response; } + protected static Response deleteValueB64(String table, String row, String column, + boolean useQueryString) throws IOException { + StringBuilder path = new StringBuilder(); + Base64.Encoder encoder = Base64.getUrlEncoder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(encoder.encodeToString(row.getBytes("UTF-8"))); + path.append('/'); + path.append(encoder.encodeToString(column.getBytes("UTF-8"))); + + Response response; + if (useQueryString) { + path.append("?e=b64"); + response = client.delete(path.toString()); + } else { + response = client.delete(path.toString(), new BasicHeader("Encoding", "b64")); + } + Thread.yield(); + return response; + } + protected static Response getValueXML(String table, String row, String column) throws IOException { StringBuilder path = new StringBuilder(); @@ -506,6 +540,26 @@ protected static Response deleteRow(String table, String row) throws IOException return response; } + protected static Response deleteRowB64(String table, String row, boolean useQueryString) + throws IOException { + StringBuilder path = new StringBuilder(); + Base64.Encoder encoder = Base64.getUrlEncoder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(encoder.encodeToString(row.getBytes("UTF-8"))); + + Response response; + if (useQueryString) { + path.append("?e=b64"); + response = client.delete(path.toString()); + } else { + response = client.delete(path.toString(), new BasicHeader("Encoding", "b64")); + } + Thread.yield(); + return response; + } + protected static Response getValueJson(String table, String row, String column) throws IOException { StringBuilder path = new StringBuilder(); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java index 9d9d2f337699..9893a9ef67d2 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java @@ -100,4 +100,38 @@ public void testDeleteXML() throws IOException, JAXBException { assertEquals(404, response.getCode()); } + private void testDeleteB64XML(boolean useQueryString) throws IOException, JAXBException { + Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + + response = deleteValueB64(TABLE, ROW_1, COLUMN_1, useQueryString); + assertEquals(200, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + response = deleteRowB64(TABLE, ROW_1, useQueryString); + assertEquals(200, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_2); + assertEquals(404, response.getCode()); + } + + @Test + public void testDeleteB64XML() throws IOException, JAXBException { + testDeleteB64XML(/* useQueryString: */false); + testDeleteB64XML(/* useQueryString: */true); + } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index b2c45e8cbd78..d14c45e0532b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.io.StringWriter; import java.net.URLEncoder; +import java.util.Base64; import java.util.HashMap; import java.util.List; import javax.xml.bind.JAXBException; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.http.Header; +import org.apache.http.message.BasicHeader; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -333,6 +335,72 @@ public void testURLEncodedKey() throws IOException, JAXBException { checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); } + private void setupValue1() throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(TABLE); + path.append('/'); + path.append(ROW_1); + path.append('/'); + path.append(COLUMN_1); + Response response = putValueXML(path.toString(), TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + } + + private void checkValue1(Response getResponse) throws JAXBException { + assertEquals(Constants.MIMETYPE_XML, getResponse.getHeader("content-type")); + + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(getResponse.getBody())); + assertEquals(1, cellSet.getRows().size()); + RowModel rowModel = cellSet.getRows().get(0); + assertEquals(ROW_1, new String(rowModel.getKey())); + assertEquals(1, rowModel.getCells().size()); + CellModel cell = rowModel.getCells().get(0); + assertEquals(COLUMN_1, new String(cell.getColumn())); + assertEquals(VALUE_1, new String(cell.getValue())); + } + + // See https://issues.apache.org/jira/browse/HBASE-28174 + @Test + public void testUrlB64EncodedKeyQueryParam() throws IOException, JAXBException { + setupValue1(); + + StringBuilder path = new StringBuilder(); + Base64.Encoder encoder = Base64.getUrlEncoder(); + path.append('/'); + path.append(TABLE); + path.append('/'); + path.append(encoder.encodeToString(ROW_1.getBytes("UTF-8"))); + path.append('/'); + path.append(encoder.encodeToString(COLUMN_1.getBytes("UTF-8"))); + path.append("?e=b64"); + Response response = getValueXML(path.toString()); + assertEquals(200, response.getCode()); + + checkValue1(response); + } + + // See https://issues.apache.org/jira/browse/HBASE-28174 + @Test + public void testUrlB64EncodedKeyHeader() throws IOException, JAXBException { + setupValue1(); + + StringBuilder path = new StringBuilder(); + Base64.Encoder encoder = Base64.getUrlEncoder(); + path.append('/'); + path.append(TABLE); + path.append('/'); + path.append(encoder.encodeToString(ROW_1.getBytes("UTF-8"))); + path.append('/'); + path.append(encoder.encodeToString(COLUMN_1.getBytes("UTF-8"))); + Response response = + getValueXML(path.toString(), new Header[] { new BasicHeader("Encoding", "b64") }); + assertEquals(200, response.getCode()); + + checkValue1(response); + } + @Test public void testNoSuchCF() throws IOException { final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA + ":"; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index 215d4f9c346c..3e7b4e5cf20c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -21,6 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; +import java.util.Base64; import java.util.Collection; import javax.xml.bind.JAXBContext; import javax.xml.bind.Marshaller; @@ -156,6 +157,76 @@ public void testMultiCellGetJSON() throws IOException { client.delete(row_6_url, extraHdr); } + private void checkMultiCellGetJSON(Response response) throws IOException { + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); + + RowModel rowModel = cellSet.getRows().get(0); + assertEquals(ROW_1, new String(rowModel.getKey())); + assertEquals(1, rowModel.getCells().size()); + CellModel cell = rowModel.getCells().get(0); + assertEquals(COLUMN_1, new String(cell.getColumn())); + assertEquals(VALUE_1, new String(cell.getValue())); + + rowModel = cellSet.getRows().get(1); + assertEquals(ROW_2, new String(rowModel.getKey())); + assertEquals(1, rowModel.getCells().size()); + cell = rowModel.getCells().get(0); + assertEquals(COLUMN_2, new String(cell.getColumn())); + assertEquals(VALUE_2, new String(cell.getValue())); + } + + // See https://issues.apache.org/jira/browse/HBASE-28174 + @Test + public void testMultiCellGetJSONB64() throws IOException { + String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2; + + if (csrfEnabled) { + Response response = client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1)); + assertEquals(400, response.getCode()); + } + + client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1), extraHdr); + client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2), extraHdr); + + StringBuilder path = new StringBuilder(); + Base64.Encoder encoder = Base64.getUrlEncoder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(encoder.encodeToString(ROW_1.getBytes("UTF-8"))); + path.append("&row="); + path.append(encoder.encodeToString(ROW_2.getBytes("UTF-8"))); + path.append("&e=b64"); // Specify encoding via query string + + Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); + + checkMultiCellGetJSON(response); + + path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(encoder.encodeToString(ROW_1.getBytes("UTF-8"))); + path.append("&row="); + path.append(encoder.encodeToString(ROW_2.getBytes("UTF-8"))); + + Header[] headers = new Header[] { new BasicHeader("Accept", Constants.MIMETYPE_JSON), + new BasicHeader("Encoding", "b64") // Specify encoding via header + }; + response = client.get(path.toString(), headers); + + checkMultiCellGetJSON(response); + + client.delete(row_5_url, extraHdr); + client.delete(row_6_url, extraHdr); + } + @Test public void testMultiCellGetXML() throws IOException { String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; diff --git a/src/main/asciidoc/_chapters/external_apis.adoc b/src/main/asciidoc/_chapters/external_apis.adoc index 129094e1d1cc..2ea4113981ba 100644 --- a/src/main/asciidoc/_chapters/external_apis.adoc +++ b/src/main/asciidoc/_chapters/external_apis.adoc @@ -250,6 +250,32 @@ curl -vi -X GET \ -H "Accept: text/xml" \ "http://example.com:8000/users/row1/cf:a/" +|/_table_/_row_/_column:qualifier_?e=b64 +|GET +|Get the value of a single column using a binary rowkey and column name, encoded in https://datatracker.ietf.org/doc/html/rfc4648#section-5[URL-safe base64]. Returned values are Base-64 encoded. +|curl -vi -X GET \ + -H "Accept: text/xml" \ + "http://example.com:8000/users/cm93MQ/Y2Y6YQ?e=b64" + +curl -vi -X GET \ + -H "Accept: text/xml" \ + -H "Encoding: base64" \ + "http://example.com:8000/users/cm93MQ/Y2Y6YQ/" + +|/_table_/multiget?row=_row_&row=_row_/_column:qualifier_&row=... +|GET +|Multi-Get a combination of rows/columns. Values are Base-64 encoded. +|curl -vi -X GET \ + -H "Accept: text/xml" \ + "http://example.com:8000/users/multiget?row=row1&row=row2/cf:a" + +|/_table_/multiget?e=b64&row=_row_&row=_row_/_column:qualifier_&row=... +|GET +|Multi-Get a combination of rows/columns using binary rowkeys and column names, encoded in https://datatracker.ietf.org/doc/html/rfc4648#section-5[URL-safe base64]. Returned values are Base-64 encoded. +|curl -vi -X GET \ + -H "Accept: text/xml" \ + "http://example.com:8000/users/multiget?e=b64&row=cm93MQ&row=cm93Mg%2FY2Y6YQ" + |/_table_/_row_/_column:qualifier_/?v=_number_of_versions_ |GET |Multi-Get a specified number of versions of a given cell. Values are Base-64 encoded. @@ -259,6 +285,55 @@ curl -vi -X GET \ |=== + +.Endpoints for `Delete` Operations +[options="header", cols="2m,m,3d,6l"] +|=== +|Endpoint +|HTTP Verb +|Description +|Example + +|/_table_/_row_ +|DELETE +|Delete all columns of a single row. +|curl -vi -X DELETE \ + "http://example.com:8000/users/row1" + +|/_table_/_row_/_column_family_: +|DELETE +|Delete all columns of a single row and column family. +|curl -vi -X DELETE \ + "http://example.com:8000/users/row1/cf" + +|/_table_/_row_/_column:qualifier_/_timestamp_ +|DELETE +|Delete a single column. +|curl -vi -X DELETE \ + "http://example.com:8000/users/row1/cf:a/1458586888395" + +|/_table_/_row_/_column:qualifier_ +|DELETE +|Delete a single column. +|curl -vi -X DELETE \ + "http://example.com:8000/users/row1/cf:a" + +curl -vi -X DELETE \ + -H "Accept: text/xml" \ + "http://example.com:8000/users/row1/cf:a/" + +|/_table_/_row_/_column:qualifier_?e=b64 +|DELETE +|Delete a single column using a binary rowkey and column name, encoded in https://datatracker.ietf.org/doc/html/rfc4648#section-5[URL-safe base64]. +|curl -vi -X DELETE \ + "http://example.com:8000/users/cm93MQ/Y2Y6YQ?e=b64" + +curl -vi -X DELETE \ + -H "Encoding: base64" \ + "http://example.com:8000/users/cm93MQ/Y2Y6YQ/" + +|=== + .Endpoints for `Scan` Operations [options="header", cols="2m,m,3d,6l"] |=== From 9631af791e0df5744c6df0c90993e033d8a8aef4 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 20 Nov 2023 10:41:35 +0800 Subject: [PATCH 144/514] HBASE-28031 TestClusterScopeQuotaThrottle is still failing with broken WAL writer (#5529) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java index b34f722e2e78..c617c34800f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java @@ -75,6 +75,9 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); + // disable stream slow monitor check, as in this test we inject our own EnvironmentEdge + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.async.wal.min.slow.detect.count", + Integer.MAX_VALUE); TEST_UTIL.startMiniCluster(2); TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); QuotaCache.TEST_FORCE_REFRESH = true; From e88daed9fbf2993fdc7feb2ba0127baa451a92cd Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 21 Nov 2023 14:29:13 +0800 Subject: [PATCH 145/514] HBASE-28210 There could be holes in stack ids when loading procedures (#5531) Signed-off-by: Wellington Chevreuil --- .../hbase/procedure2/ProcedureExecutor.java | 10 +- .../hbase/procedure2/TestStackIdHoles.java | 228 ++++++++++++++++++ 2 files changed, 234 insertions(+), 4 deletions(-) create mode 100644 hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStackIdHoles.java diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 696dbb71b9f9..46ce065b8778 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1695,9 +1695,6 @@ private void execProcedure(RootProcedureState procStack, } } - // Add the procedure to the stack - procStack.addRollbackStep(procedure); - // allows to kill the executor before something is stored to the wal. // useful to test the procedure recovery. if ( @@ -1715,7 +1712,12 @@ private void execProcedure(RootProcedureState procStack, // Commit the transaction even if a suspend (state may have changed). Note this append // can take a bunch of time to complete. if (procedure.needPersistence()) { - updateStoreOnExec(procStack, procedure, subprocs); + // Add the procedure to the stack + // See HBASE-28210 on why we need synchronized here + synchronized (procStack) { + procStack.addRollbackStep(procedure); + updateStoreOnExec(procStack, procedure, subprocs); + } } // if the store is not running we are aborting diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStackIdHoles.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStackIdHoles.java new file mode 100644 index 000000000000..4708df44745b --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStackIdHoles.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.LinkedHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase; +import org.apache.hadoop.hbase.procedure2.store.ProcedureTree; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.AtomicUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; + +/** + * Testcase for HBASE-28210, where we persist the procedure which has been inserted later to + * {@link RootProcedureState} first and then crash, and then cause holes in stack ids when loading, + * and finally fail the start up of master. + */ +@Category({ MasterTests.class, SmallTests.class }) +public class TestStackIdHoles { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestStackIdHoles.class); + + private final class DummyProcedureStore extends ProcedureStoreBase { + + private int numThreads; + + private final LinkedHashMap procMap = + new LinkedHashMap(); + + private final AtomicLong maxProcId = new AtomicLong(0); + + private final AtomicBoolean updated = new AtomicBoolean(false); + + @Override + public void start(int numThreads) throws IOException { + this.numThreads = numThreads; + setRunning(true); + } + + @Override + public void stop(boolean abort) { + } + + @Override + public int getNumThreads() { + return numThreads; + } + + @Override + public int setRunningProcedureCount(int count) { + return count; + } + + @Override + public void recoverLease() throws IOException { + } + + @Override + public void load(ProcedureLoader loader) throws IOException { + loader.setMaxProcId(maxProcId.get()); + ProcedureTree tree = ProcedureTree.build(procMap.values()); + loader.load(tree.getValidProcs()); + loader.handleCorrupted(tree.getCorruptedProcs()); + } + + @Override + public void insert(Procedure proc, Procedure[] subprocs) { + long max = proc.getProcId(); + synchronized (procMap) { + try { + procMap.put(proc.getProcId(), ProcedureUtil.convertToProtoProcedure(proc)); + if (subprocs != null) { + for (Procedure p : subprocs) { + procMap.put(p.getProcId(), ProcedureUtil.convertToProtoProcedure(p)); + max = Math.max(max, p.getProcId()); + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + AtomicUtils.updateMax(maxProcId, max); + } + + @Override + public void insert(Procedure[] procs) { + long max = -1; + synchronized (procMap) { + try { + for (Procedure p : procs) { + procMap.put(p.getProcId(), ProcedureUtil.convertToProtoProcedure(p)); + max = Math.max(max, p.getProcId()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + AtomicUtils.updateMax(maxProcId, max); + } + + @Override + public void update(Procedure proc) { + // inject a sleep to simulate the scenario in HBASE-28210 + if (proc.hasParent() && proc.getStackIndexes() != null) { + int lastStackId = proc.getStackIndexes()[proc.getStackIndexes().length - 1]; + try { + // sleep more times if the stack id is smaller + Thread.sleep(100L * (10 - lastStackId)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + // simulate the failure when updating the second sub procedure + if (!updated.compareAndSet(false, true)) { + procExec.stop(); + throw new RuntimeException("inject error"); + } + } + synchronized (procMap) { + try { + procMap.put(proc.getProcId(), ProcedureUtil.convertToProtoProcedure(proc)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + @Override + public void delete(long procId) { + synchronized (procMap) { + procMap.remove(procId); + } + } + + @Override + public void delete(Procedure parentProc, long[] subProcIds) { + synchronized (procMap) { + try { + procMap.put(parentProc.getProcId(), ProcedureUtil.convertToProtoProcedure(parentProc)); + for (long procId : subProcIds) { + procMap.remove(procId); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + @Override + public void delete(long[] procIds, int offset, int count) { + synchronized (procMap) { + for (int i = 0; i < count; i++) { + long procId = procIds[offset + i]; + procMap.remove(procId); + } + } + } + } + + private final HBaseCommonTestingUtil HBTU = new HBaseCommonTestingUtil(); + + private DummyProcedureStore procStore; + + private ProcedureExecutor procExec; + + @Before + public void setUp() throws IOException { + procStore = new DummyProcedureStore(); + procStore.start(4); + procExec = new ProcedureExecutor(HBTU.getConfiguration(), null, procStore); + procExec.init(4, true); + procExec.startWorkers(); + } + + @After + public void tearDown() { + procExec.stop(); + } + + public static class DummyProcedure extends NoopProcedure { + + @Override + protected Procedure[] execute(Void env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + return new Procedure[] { new NoopProcedure(), new NoopProcedure() }; + } + } + + @Test + public void testLoad() throws IOException { + procExec.submitProcedure(new DummyProcedure()); + // wait for the error + HBTU.waitFor(30000, () -> !procExec.isRunning()); + procExec = new ProcedureExecutor(HBTU.getConfiguration(), null, procStore); + // make sure there is no error while loading + procExec.init(4, true); + } +} From e799ee08be07d43f802c5f2624614630d80a0c9c Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 21 Nov 2023 18:42:42 +0000 Subject: [PATCH 146/514] HBASE-27999 Implement cache prefetch aware load balancer (#5527) this commit is part of the rebase of HBASE-28186 Signed-off-by: Wellington Chevreuil Signed-off-by: Tak Lon (Stephen) Wu Co-authored-by: Rahul Agarkar --- .../master/balancer/BalancerClusterState.java | 156 +++++- .../master/balancer/BalancerRegionLoad.java | 12 + .../master/balancer/BaseLoadBalancer.java | 3 +- .../balancer/CacheAwareLoadBalancer.java | 479 ++++++++++++++++++ .../balancer/StochasticLoadBalancer.java | 54 +- .../master/balancer/BalancerTestBase.java | 14 + .../balancer/TestStochasticLoadBalancer.java | 4 + .../org/apache/hadoop/hbase/HConstants.java | 12 + .../hbase/io/hfile/BlockCacheFactory.java | 13 +- .../hbase/io/hfile/bucket/BucketCache.java | 2 + .../io/hfile/bucket/PersistentIOEngine.java | 4 +- .../balancer/TestCacheAwareLoadBalancer.java | 397 +++++++++++++++ ...stCacheAwareLoadBalancerCostFunctions.java | 316 ++++++++++++ ...rWithStochasticLoadBalancerAsInternal.java | 2 + 14 files changed, 1432 insertions(+), 36 deletions(-) create mode 100644 hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancerCostFunctions.java diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java index a7ae8b4d1a5a..4b3809c107cb 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,6 +115,12 @@ class BalancerClusterState { private float[][] rackLocalities; // Maps localityType -> region -> [server|rack]Index with highest locality private int[][] regionsToMostLocalEntities; + // Maps region -> serverIndex -> regionCacheRatio of a region on a server + private Map, Float> regionIndexServerIndexRegionCachedRatio; + // Maps regionIndex -> serverIndex with best region cache ratio + private int[] regionServerIndexWithBestRegionCachedRatio; + // Maps regionName -> oldServerName -> cache ratio of the region on the old server + Map> regionCacheRatioOnOldServerMap; static class DefaultRackManager extends RackManager { @Override @@ -125,13 +132,20 @@ public String getRack(ServerName server) { BalancerClusterState(Map> clusterState, Map> loads, RegionHDFSBlockLocationFinder regionFinder, RackManager rackManager) { - this(null, clusterState, loads, regionFinder, rackManager); + this(null, clusterState, loads, regionFinder, rackManager, null); + } + + protected BalancerClusterState(Map> clusterState, + Map> loads, RegionHDFSBlockLocationFinder regionFinder, + RackManager rackManager, Map> oldRegionServerRegionCacheRatio) { + this(null, clusterState, loads, regionFinder, rackManager, oldRegionServerRegionCacheRatio); } @SuppressWarnings("unchecked") BalancerClusterState(Collection unassignedRegions, Map> clusterState, Map> loads, - RegionHDFSBlockLocationFinder regionFinder, RackManager rackManager) { + RegionHDFSBlockLocationFinder regionFinder, RackManager rackManager, + Map> oldRegionServerRegionCacheRatio) { if (unassignedRegions == null) { unassignedRegions = Collections.emptyList(); } @@ -145,6 +159,8 @@ public String getRack(ServerName server) { tables = new ArrayList<>(); this.rackManager = rackManager != null ? rackManager : new DefaultRackManager(); + this.regionCacheRatioOnOldServerMap = oldRegionServerRegionCacheRatio; + numRegions = 0; List> serversPerHostList = new ArrayList<>(); @@ -541,6 +557,142 @@ private void computeCachedLocalities() { } + /** + * Returns the size of hFiles from the most recent RegionLoad for region + */ + public int getTotalRegionHFileSizeMB(int region) { + Deque load = regionLoads[region]; + if (load == null) { + // This means, that the region has no actual data on disk + return 0; + } + return regionLoads[region].getLast().getRegionSizeMB(); + } + + /** + * Returns the weighted cache ratio of a region on the given region server + */ + public float getOrComputeWeightedRegionCacheRatio(int region, int server) { + return getTotalRegionHFileSizeMB(region) * getOrComputeRegionCacheRatio(region, server); + } + + /** + * Returns the amount by which a region is cached on a given region server. If the region is not + * currently hosted on the given region server, then find out if it was previously hosted there + * and return the old cache ratio. + */ + protected float getRegionCacheRatioOnRegionServer(int region, int regionServerIndex) { + float regionCacheRatio = 0.0f; + + // Get the current region cache ratio if the region is hosted on the server regionServerIndex + for (int regionIndex : regionsPerServer[regionServerIndex]) { + if (region != regionIndex) { + continue; + } + + Deque regionLoadList = regionLoads[regionIndex]; + + // The region is currently hosted on this region server. Get the region cache ratio for this + // region on this server + regionCacheRatio = + regionLoadList == null ? 0.0f : regionLoadList.getLast().getCurrentRegionCacheRatio(); + + return regionCacheRatio; + } + + // Region is not currently hosted on this server. Check if the region was cached on this + // server earlier. This can happen when the server was shutdown and the cache was persisted. + // Search using the region name and server name and not the index id and server id as these ids + // may change when a server is marked as dead or a new server is added. + String regionEncodedName = regions[region].getEncodedName(); + ServerName serverName = servers[regionServerIndex]; + if ( + regionCacheRatioOnOldServerMap != null + && regionCacheRatioOnOldServerMap.containsKey(regionEncodedName) + ) { + Pair cacheRatioOfRegionOnServer = + regionCacheRatioOnOldServerMap.get(regionEncodedName); + if (ServerName.isSameAddress(cacheRatioOfRegionOnServer.getFirst(), serverName)) { + regionCacheRatio = cacheRatioOfRegionOnServer.getSecond(); + if (LOG.isDebugEnabled()) { + LOG.debug("Old cache ratio found for region {} on server {}: {}", regionEncodedName, + serverName, regionCacheRatio); + } + } + } + return regionCacheRatio; + } + + /** + * Populate the maps containing information about how much a region is cached on a region server. + */ + private void computeRegionServerRegionCacheRatio() { + regionIndexServerIndexRegionCachedRatio = new HashMap<>(); + regionServerIndexWithBestRegionCachedRatio = new int[numRegions]; + + for (int region = 0; region < numRegions; region++) { + float bestRegionCacheRatio = 0.0f; + int serverWithBestRegionCacheRatio = 0; + for (int server = 0; server < numServers; server++) { + float regionCacheRatio = getRegionCacheRatioOnRegionServer(region, server); + if (regionCacheRatio > 0.0f || server == regionIndexToServerIndex[region]) { + // A region with cache ratio 0 on a server means nothing. Hence, just make a note of + // cache ratio only if the cache ratio is greater than 0. + Pair regionServerPair = new Pair<>(region, server); + regionIndexServerIndexRegionCachedRatio.put(regionServerPair, regionCacheRatio); + } + if (regionCacheRatio > bestRegionCacheRatio) { + serverWithBestRegionCacheRatio = server; + // If the server currently hosting the region has equal cache ratio to a historical + // server, consider the current server to keep hosting the region + bestRegionCacheRatio = regionCacheRatio; + } else if ( + regionCacheRatio == bestRegionCacheRatio && server == regionIndexToServerIndex[region] + ) { + // If two servers have same region cache ratio, then the server currently hosting the + // region + // should retain the region + serverWithBestRegionCacheRatio = server; + } + } + regionServerIndexWithBestRegionCachedRatio[region] = serverWithBestRegionCacheRatio; + Pair regionServerPair = + new Pair<>(region, regionIndexToServerIndex[region]); + float tempRegionCacheRatio = regionIndexServerIndexRegionCachedRatio.get(regionServerPair); + if (tempRegionCacheRatio > bestRegionCacheRatio) { + LOG.warn( + "INVALID CONDITION: region {} on server {} cache ratio {} is greater than the " + + "best region cache ratio {} on server {}", + regions[region].getEncodedName(), servers[regionIndexToServerIndex[region]], + tempRegionCacheRatio, bestRegionCacheRatio, servers[serverWithBestRegionCacheRatio]); + } + } + } + + protected float getOrComputeRegionCacheRatio(int region, int server) { + if ( + regionServerIndexWithBestRegionCachedRatio == null + || regionIndexServerIndexRegionCachedRatio.isEmpty() + ) { + computeRegionServerRegionCacheRatio(); + } + + Pair regionServerPair = new Pair<>(region, server); + return regionIndexServerIndexRegionCachedRatio.containsKey(regionServerPair) + ? regionIndexServerIndexRegionCachedRatio.get(regionServerPair) + : 0.0f; + } + + public int[] getOrComputeServerWithBestRegionCachedRatio() { + if ( + regionServerIndexWithBestRegionCachedRatio == null + || regionIndexServerIndexRegionCachedRatio.isEmpty() + ) { + computeRegionServerRegionCacheRatio(); + } + return regionServerIndexWithBestRegionCachedRatio; + } + /** * Maps region index to rack index */ diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java index ffb36cb8ca1a..33d00e3de862 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java @@ -34,6 +34,8 @@ class BalancerRegionLoad { private final long writeRequestsCount; private final int memStoreSizeMB; private final int storefileSizeMB; + private final int regionSizeMB; + private final float currentRegionPrefetchRatio; BalancerRegionLoad(RegionMetrics regionMetrics) { readRequestsCount = regionMetrics.getReadRequestCount(); @@ -41,6 +43,8 @@ class BalancerRegionLoad { writeRequestsCount = regionMetrics.getWriteRequestCount(); memStoreSizeMB = (int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE); storefileSizeMB = (int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE); + regionSizeMB = (int) regionMetrics.getRegionSizeMB().get(Size.Unit.MEGABYTE); + currentRegionPrefetchRatio = regionMetrics.getCurrentRegionCachedRatio(); } public long getReadRequestsCount() { @@ -62,4 +66,12 @@ public int getMemStoreSizeMB() { public int getStorefileSizeMB() { return storefileSizeMB; } + + public int getRegionSizeMB() { + return regionSizeMB; + } + + public float getCurrentRegionCacheRatio() { + return currentRegionPrefetchRatio; + } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index a4560cc595a2..54516868a0a0 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -232,7 +232,8 @@ private BalancerClusterState createCluster(List servers, clusterState.put(server, Collections.emptyList()); } } - return new BalancerClusterState(regions, clusterState, null, this.regionFinder, rackManager); + return new BalancerClusterState(regions, clusterState, null, this.regionFinder, rackManager, + null); } private List findIdleServers(List servers) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java new file mode 100644 index 000000000000..d73769a3971b --- /dev/null +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java @@ -0,0 +1,479 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +/** An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that assigns regions + * based on the amount they are cached on a given server. A region can move across the region + * servers whenever a region server shuts down or crashes. The region server preserves the cache + * periodically and restores the cache when it is restarted. This balancer implements a mechanism + * where it maintains the amount by which a region is cached on a region server. During balancer + * run, a region plan is generated that takes into account this cache information and tries to + * move the regions so that the cache minimally impacted. + */ + +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Size; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class CacheAwareLoadBalancer extends StochasticLoadBalancer { + private static final Logger LOG = LoggerFactory.getLogger(CacheAwareLoadBalancer.class); + + private Configuration configuration; + + public enum GeneratorFunctionType { + LOAD, + CACHE_RATIO + } + + @Override + public synchronized void loadConf(Configuration configuration) { + this.configuration = configuration; + this.costFunctions = new ArrayList<>(); + super.loadConf(configuration); + } + + @Override + protected List createCandidateGenerators() { + List candidateGenerators = new ArrayList<>(2); + candidateGenerators.add(GeneratorFunctionType.LOAD.ordinal(), + new CacheAwareSkewnessCandidateGenerator()); + candidateGenerators.add(GeneratorFunctionType.CACHE_RATIO.ordinal(), + new CacheAwareCandidateGenerator()); + return candidateGenerators; + } + + @Override + protected List createCostFunctions(Configuration configuration) { + List costFunctions = new ArrayList<>(); + addCostFunction(costFunctions, new CacheAwareRegionSkewnessCostFunction(configuration)); + addCostFunction(costFunctions, new CacheAwareCostFunction(configuration)); + return costFunctions; + } + + private void addCostFunction(List costFunctions, CostFunction costFunction) { + if (costFunction.getMultiplier() > 0) { + costFunctions.add(costFunction); + } + } + + @Override + public void updateClusterMetrics(ClusterMetrics clusterMetrics) { + this.clusterStatus = clusterMetrics; + updateRegionLoad(); + } + + /** + * Collect the amount of region cached for all the regions from all the active region servers. + */ + private void updateRegionLoad() { + loads = new HashMap<>(); + regionCacheRatioOnOldServerMap = new HashMap<>(); + Map> regionCacheRatioOnCurrentServerMap = new HashMap<>(); + + // Build current region cache statistics + clusterStatus.getLiveServerMetrics().forEach((ServerName sn, ServerMetrics sm) -> { + // Create a map of region and the server where it is currently hosted + sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> { + String regionEncodedName = RegionInfo.encodeRegionName(regionName); + + Deque rload = new ArrayDeque<>(); + + // Get the total size of the hFiles in this region + int regionSizeMB = (int) rm.getRegionSizeMB().get(Size.Unit.MEGABYTE); + + rload.add(new BalancerRegionLoad(rm)); + // Maintain a map of region and it's total size. This is needed to calculate the cache + // ratios for the regions cached on old region servers + regionCacheRatioOnCurrentServerMap.put(regionEncodedName, new Pair<>(sn, regionSizeMB)); + loads.put(regionEncodedName, rload); + }); + }); + + // Build cache statistics for the regions hosted previously on old region servers + clusterStatus.getLiveServerMetrics().forEach((ServerName sn, ServerMetrics sm) -> { + // Find if a region was previously hosted on a server other than the one it is currently + // hosted on. + sm.getRegionCachedInfo().forEach((String regionEncodedName, Integer regionSizeInCache) -> { + // If the region is found in regionCacheRatioOnCurrentServerMap, it is currently hosted on + // this server + if (regionCacheRatioOnCurrentServerMap.containsKey(regionEncodedName)) { + ServerName currentServer = + regionCacheRatioOnCurrentServerMap.get(regionEncodedName).getFirst(); + if (!ServerName.isSameAddress(currentServer, sn)) { + int regionSizeMB = + regionCacheRatioOnCurrentServerMap.get(regionEncodedName).getSecond(); + float regionCacheRatioOnOldServer = + regionSizeMB == 0 ? 0.0f : (float) regionSizeInCache / regionSizeMB; + regionCacheRatioOnOldServerMap.put(regionEncodedName, + new Pair<>(sn, regionCacheRatioOnOldServer)); + } + } + }); + }); + } + + private RegionInfo getRegionInfoByEncodedName(BalancerClusterState cluster, String regionName) { + Optional regionInfoOptional = + Arrays.stream(cluster.regions).filter((RegionInfo ri) -> { + return regionName.equals(ri.getEncodedName()); + }).findFirst(); + + if (regionInfoOptional.isPresent()) { + return regionInfoOptional.get(); + } + return null; + } + + private class CacheAwareCandidateGenerator extends CandidateGenerator { + @Override + protected BalanceAction generate(BalancerClusterState cluster) { + // Move the regions to the servers they were previously hosted on based on the cache ratio + if ( + !regionCacheRatioOnOldServerMap.isEmpty() + && regionCacheRatioOnOldServerMap.entrySet().iterator().hasNext() + ) { + Map.Entry> regionCacheRatioServerMap = + regionCacheRatioOnOldServerMap.entrySet().iterator().next(); + // Get the server where this region was previously hosted + String regionEncodedName = regionCacheRatioServerMap.getKey(); + RegionInfo regionInfo = getRegionInfoByEncodedName(cluster, regionEncodedName); + if (regionInfo == null) { + LOG.warn("Region {} not found", regionEncodedName); + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + return BalanceAction.NULL_ACTION; + } + if (regionInfo.isMetaRegion() || regionInfo.getTable().isSystemTable()) { + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + return BalanceAction.NULL_ACTION; + } + int regionIndex = cluster.regionsToIndex.get(regionInfo); + int oldServerIndex = cluster.serversToIndex + .get(regionCacheRatioOnOldServerMap.get(regionEncodedName).getFirst().getAddress()); + if (oldServerIndex < 0) { + LOG.warn("Server previously hosting region {} not found", regionEncodedName); + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + return BalanceAction.NULL_ACTION; + } + + float oldRegionCacheRatio = + cluster.getOrComputeRegionCacheRatio(regionIndex, oldServerIndex); + int currentServerIndex = cluster.regionIndexToServerIndex[regionIndex]; + float currentRegionCacheRatio = + cluster.getOrComputeRegionCacheRatio(regionIndex, currentServerIndex); + + BalanceAction action = generatePlan(cluster, regionIndex, currentServerIndex, + currentRegionCacheRatio, oldServerIndex, oldRegionCacheRatio); + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + return action; + } + return BalanceAction.NULL_ACTION; + } + + private BalanceAction generatePlan(BalancerClusterState cluster, int regionIndex, + int currentServerIndex, float cacheRatioOnCurrentServer, int oldServerIndex, + float cacheRatioOnOldServer) { + return moveRegionToOldServer(cluster, regionIndex, currentServerIndex, + cacheRatioOnCurrentServer, oldServerIndex, cacheRatioOnOldServer) + ? getAction(currentServerIndex, regionIndex, oldServerIndex, -1) + : BalanceAction.NULL_ACTION; + } + + private boolean moveRegionToOldServer(BalancerClusterState cluster, int regionIndex, + int currentServerIndex, float cacheRatioOnCurrentServer, int oldServerIndex, + float cacheRatioOnOldServer) { + // Find if the region has already moved by comparing the current server index with the + // current server index. This can happen when other candidate generator has moved the region + if (currentServerIndex < 0 || oldServerIndex < 0) { + return false; + } + + float cacheRatioDiffThreshold = 0.6f; + + // Conditions for moving the region + + // If the region is fully cached on the old server, move the region back + if (cacheRatioOnOldServer == 1.0f) { + if (LOG.isDebugEnabled()) { + LOG.debug("Region {} moved to the old server {} as it is fully cached there", + cluster.regions[regionIndex].getEncodedName(), cluster.servers[oldServerIndex]); + } + return true; + } + + // Move the region back to the old server if it is cached equally on both the servers + if (cacheRatioOnCurrentServer == cacheRatioOnOldServer) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Region {} moved from {} to {} as the region is cached {} equally on both servers", + cluster.regions[regionIndex].getEncodedName(), cluster.servers[currentServerIndex], + cluster.servers[oldServerIndex], cacheRatioOnCurrentServer); + } + return true; + } + + // If the region is not fully cached on either of the servers, move the region back to the + // old server if the region cache ratio on the current server is still much less than the old + // server + if ( + cacheRatioOnOldServer > 0.0f + && cacheRatioOnCurrentServer / cacheRatioOnOldServer < cacheRatioDiffThreshold + ) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Region {} moved from {} to {} as region cache ratio {} is better than the current " + + "cache ratio {}", + cluster.regions[regionIndex].getEncodedName(), cluster.servers[currentServerIndex], + cluster.servers[oldServerIndex], cacheRatioOnCurrentServer, cacheRatioOnOldServer); + } + return true; + } + + if (LOG.isDebugEnabled()) { + LOG.debug( + "Region {} not moved from {} to {} with current cache ratio {} and old cache ratio {}", + cluster.regions[regionIndex], cluster.servers[currentServerIndex], + cluster.servers[oldServerIndex], cacheRatioOnCurrentServer, cacheRatioOnOldServer); + } + return false; + } + } + + private class CacheAwareSkewnessCandidateGenerator extends LoadCandidateGenerator { + @Override + BalanceAction pickRandomRegions(BalancerClusterState cluster, int thisServer, int otherServer) { + // First move all the regions which were hosted previously on some other server back to their + // old servers + if ( + !regionCacheRatioOnOldServerMap.isEmpty() + && regionCacheRatioOnOldServerMap.entrySet().iterator().hasNext() + ) { + // Get the first region index in the historical cache ratio list + Map.Entry> regionEntry = + regionCacheRatioOnOldServerMap.entrySet().iterator().next(); + String regionEncodedName = regionEntry.getKey(); + + RegionInfo regionInfo = getRegionInfoByEncodedName(cluster, regionEncodedName); + if (regionInfo == null) { + LOG.warn("Region {} does not exist", regionEncodedName); + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + return BalanceAction.NULL_ACTION; + } + if (regionInfo.isMetaRegion() || regionInfo.getTable().isSystemTable()) { + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + return BalanceAction.NULL_ACTION; + } + + int regionIndex = cluster.regionsToIndex.get(regionInfo); + + // Get the current host name for this region + thisServer = cluster.regionIndexToServerIndex[regionIndex]; + + // Get the old server index + otherServer = cluster.serversToIndex.get(regionEntry.getValue().getFirst().getAddress()); + + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + + if (otherServer < 0) { + // The old server has been moved to other host and hence, the region cannot be moved back + // to the old server + if (LOG.isDebugEnabled()) { + LOG.debug( + "CacheAwareSkewnessCandidateGenerator: Region {} not moved to the old " + + "server {} as the server does not exist", + regionEncodedName, regionEntry.getValue().getFirst().getHostname()); + } + return BalanceAction.NULL_ACTION; + } + + if (LOG.isDebugEnabled()) { + LOG.debug( + "CacheAwareSkewnessCandidateGenerator: Region {} moved from {} to {} as it " + + "was hosted their earlier", + regionEncodedName, cluster.servers[thisServer].getHostname(), + cluster.servers[otherServer].getHostname()); + } + + return getAction(thisServer, regionIndex, otherServer, -1); + } + + if (thisServer < 0 || otherServer < 0) { + return BalanceAction.NULL_ACTION; + } + + int regionIndexToMove = pickLeastCachedRegion(cluster, thisServer); + if (regionIndexToMove < 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("CacheAwareSkewnessCandidateGenerator: No region found for movement"); + } + return BalanceAction.NULL_ACTION; + } + if (LOG.isDebugEnabled()) { + LOG.debug( + "CacheAwareSkewnessCandidateGenerator: Region {} moved from {} to {} as it is " + + "least cached on current server", + cluster.regions[regionIndexToMove].getEncodedName(), + cluster.servers[thisServer].getHostname(), cluster.servers[otherServer].getHostname()); + } + return getAction(thisServer, regionIndexToMove, otherServer, -1); + } + + private int pickLeastCachedRegion(BalancerClusterState cluster, int thisServer) { + float minCacheRatio = Float.MAX_VALUE; + int leastCachedRegion = -1; + for (int i = 0; i < cluster.regionsPerServer[thisServer].length; i++) { + int regionIndex = cluster.regionsPerServer[thisServer][i]; + + float cacheRatioOnCurrentServer = + cluster.getOrComputeRegionCacheRatio(regionIndex, thisServer); + if (cacheRatioOnCurrentServer < minCacheRatio) { + minCacheRatio = cacheRatioOnCurrentServer; + leastCachedRegion = regionIndex; + } + } + return leastCachedRegion; + } + } + + static class CacheAwareRegionSkewnessCostFunction extends CostFunction { + static final String REGION_COUNT_SKEW_COST_KEY = + "hbase.master.balancer.stochastic.regionCountCost"; + static final float DEFAULT_REGION_COUNT_SKEW_COST = 20; + private final DoubleArrayCost cost = new DoubleArrayCost(); + + CacheAwareRegionSkewnessCostFunction(Configuration conf) { + // Load multiplier should be the greatest as it is the most general way to balance data. + this.setMultiplier(conf.getFloat(REGION_COUNT_SKEW_COST_KEY, DEFAULT_REGION_COUNT_SKEW_COST)); + } + + @Override + void prepare(BalancerClusterState cluster) { + super.prepare(cluster); + cost.prepare(cluster.numServers); + cost.applyCostsChange(costs -> { + for (int i = 0; i < cluster.numServers; i++) { + costs[i] = cluster.regionsPerServer[i].length; + } + }); + } + + @Override + protected double cost() { + return cost.cost(); + } + + @Override + protected void regionMoved(int region, int oldServer, int newServer) { + cost.applyCostsChange(costs -> { + costs[oldServer] = cluster.regionsPerServer[oldServer].length; + costs[newServer] = cluster.regionsPerServer[newServer].length; + }); + } + + public final void updateWeight(double[] weights) { + weights[GeneratorFunctionType.LOAD.ordinal()] += cost(); + } + } + + static class CacheAwareCostFunction extends CostFunction { + private static final String CACHE_COST_KEY = "hbase.master.balancer.stochastic.cacheCost"; + private double cacheRatio; + private double bestCacheRatio; + + private static final float DEFAULT_CACHE_COST = 20; + + CacheAwareCostFunction(Configuration conf) { + boolean isPersistentCache = conf.get(BUCKET_CACHE_PERSISTENT_PATH_KEY) != null; + // Disable the CacheAwareCostFunction if the cached file list persistence is not enabled + this.setMultiplier( + !isPersistentCache ? 0.0f : conf.getFloat(CACHE_COST_KEY, DEFAULT_CACHE_COST)); + bestCacheRatio = 0.0; + cacheRatio = 0.0; + } + + @Override + void prepare(BalancerClusterState cluster) { + super.prepare(cluster); + cacheRatio = 0.0; + bestCacheRatio = 0.0; + + for (int region = 0; region < cluster.numRegions; region++) { + cacheRatio += cluster.getOrComputeWeightedRegionCacheRatio(region, + cluster.regionIndexToServerIndex[region]); + bestCacheRatio += cluster.getOrComputeWeightedRegionCacheRatio(region, + getServerWithBestCacheRatioForRegion(region)); + } + + cacheRatio = bestCacheRatio == 0 ? 1.0 : cacheRatio / bestCacheRatio; + if (LOG.isDebugEnabled()) { + LOG.debug("CacheAwareCostFunction: Cost: {}", 1 - cacheRatio); + } + } + + @Override + protected double cost() { + return scale(0, 1, 1 - cacheRatio); + } + + @Override + protected void regionMoved(int region, int oldServer, int newServer) { + double regionCacheRatioOnOldServer = + cluster.getOrComputeWeightedRegionCacheRatio(region, oldServer); + double regionCacheRatioOnNewServer = + cluster.getOrComputeWeightedRegionCacheRatio(region, newServer); + double cacheRatioDiff = regionCacheRatioOnNewServer - regionCacheRatioOnOldServer; + double normalizedDelta = bestCacheRatio == 0.0 ? 0.0 : cacheRatioDiff / bestCacheRatio; + cacheRatio += normalizedDelta; + if (LOG.isDebugEnabled() && (cacheRatio < 0.0 || cacheRatio > 1.0)) { + LOG.debug( + "CacheAwareCostFunction:regionMoved:region:{}:from:{}:to:{}:regionCacheRatioOnOldServer:{}:" + + "regionCacheRatioOnNewServer:{}:bestRegionCacheRatio:{}:cacheRatio:{}", + cluster.regions[region].getEncodedName(), cluster.servers[oldServer].getHostname(), + cluster.servers[newServer].getHostname(), regionCacheRatioOnOldServer, + regionCacheRatioOnNewServer, bestCacheRatio, cacheRatio); + } + } + + private int getServerWithBestCacheRatioForRegion(int region) { + return cluster.getOrComputeServerWithBestRegionCachedRatio()[region]; + } + + @Override + public final void updateWeight(double[] weights) { + weights[GeneratorFunctionType.CACHE_RATIO.ordinal()] += cost(); + } + } +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index edf049e8a718..e5cd5446c5c8 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -136,8 +137,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private long maxRunningTime = DEFAULT_MAX_RUNNING_TIME; private int numRegionLoadsToRemember = DEFAULT_KEEP_REGION_LOADS; private float minCostNeedBalance = DEFAULT_MIN_COST_NEED_BALANCE; + Map> regionCacheRatioOnOldServerMap = new HashMap<>(); - private List costFunctions; // FindBugs: Wants this protected; IS2_INCONSISTENT_SYNC + protected List costFunctions; // FindBugs: Wants this protected; + // IS2_INCONSISTENT_SYNC // To save currently configed sum of multiplier. Defaulted at 1 for cases that carry high cost private float sumMultiplier; // to save and report costs to JMX @@ -224,6 +227,24 @@ protected List createCandidateGenerators() { return candidateGenerators; } + protected List createCostFunctions(Configuration conf) { + List costFunctions = new ArrayList<>(); + addCostFunction(costFunctions, new RegionCountSkewCostFunction(conf)); + addCostFunction(costFunctions, new PrimaryRegionCountSkewCostFunction(conf)); + addCostFunction(costFunctions, new MoveCostFunction(conf, provider)); + addCostFunction(costFunctions, localityCost); + addCostFunction(costFunctions, rackLocalityCost); + addCostFunction(costFunctions, new TableSkewCostFunction(conf)); + addCostFunction(costFunctions, regionReplicaHostCostFunction); + addCostFunction(costFunctions, regionReplicaRackCostFunction); + addCostFunction(costFunctions, new ReadRequestCostFunction(conf)); + addCostFunction(costFunctions, new CPRequestCostFunction(conf)); + addCostFunction(costFunctions, new WriteRequestCostFunction(conf)); + addCostFunction(costFunctions, new MemStoreSizeCostFunction(conf)); + addCostFunction(costFunctions, new StoreFileCostFunction(conf)); + return costFunctions; + } + @Override protected void loadConf(Configuration conf) { super.loadConf(conf); @@ -242,20 +263,7 @@ protected void loadConf(Configuration conf) { regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf); regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf); - costFunctions = new ArrayList<>(); - addCostFunction(new RegionCountSkewCostFunction(conf)); - addCostFunction(new PrimaryRegionCountSkewCostFunction(conf)); - addCostFunction(new MoveCostFunction(conf, provider)); - addCostFunction(localityCost); - addCostFunction(rackLocalityCost); - addCostFunction(new TableSkewCostFunction(conf)); - addCostFunction(regionReplicaHostCostFunction); - addCostFunction(regionReplicaRackCostFunction); - addCostFunction(new ReadRequestCostFunction(conf)); - addCostFunction(new CPRequestCostFunction(conf)); - addCostFunction(new WriteRequestCostFunction(conf)); - addCostFunction(new MemStoreSizeCostFunction(conf)); - addCostFunction(new StoreFileCostFunction(conf)); + this.costFunctions = createCostFunctions(conf); loadCustomCostFunctions(conf); curFunctionCosts = new double[costFunctions.size()]; @@ -459,8 +467,8 @@ protected List balanceTable(TableName tableName, // The clusterState that is given to this method contains the state // of all the regions in the table(s) (that's true today) // Keep track of servers to iterate through them. - BalancerClusterState cluster = - new BalancerClusterState(loadOfOneTable, loads, finder, rackManager); + BalancerClusterState cluster = new BalancerClusterState(loadOfOneTable, loads, finder, + rackManager, regionCacheRatioOnOldServerMap); long startTime = EnvironmentEdgeManager.currentTime(); @@ -568,7 +576,7 @@ protected List balanceTable(TableName tableName, return null; } - private void sendRejectionReasonToRingBuffer(Supplier reason, + protected void sendRejectionReasonToRingBuffer(Supplier reason, List costFunctions) { provider.recordBalancerRejection(() -> { BalancerRejection.Builder builder = new BalancerRejection.Builder().setReason(reason.get()); @@ -627,14 +635,14 @@ private void updateStochasticCosts(TableName tableName, double overall, double[] } } - private void addCostFunction(CostFunction costFunction) { + private void addCostFunction(List costFunctions, CostFunction costFunction) { float multiplier = costFunction.getMultiplier(); if (multiplier > 0) { costFunctions.add(costFunction); } } - private String functionCost() { + protected String functionCost() { StringBuilder builder = new StringBuilder(); for (CostFunction c : costFunctions) { builder.append(c.getClass().getSimpleName()); @@ -655,6 +663,12 @@ private String functionCost() { return builder.toString(); } + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + List getCostFunctions() { + return costFunctions; + } + private String totalCostsPerFunc() { StringBuilder builder = new StringBuilder(); for (CostFunction c : costFunctions) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index 9ea1c94d1e09..4a996e7796f5 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.NavigableSet; @@ -376,6 +377,19 @@ protected TreeMap> mockClusterServers(int[] mockClu return servers; } + protected Map> mockClusterServersUnsorted(int[] mockCluster, + int numTables) { + int numServers = mockCluster.length; + Map> servers = new LinkedHashMap<>(); + for (int i = 0; i < numServers; i++) { + int numRegions = mockCluster[i]; + ServerAndLoad sal = randomServer(0); + List regions = randomRegions(numRegions, numTables); + servers.put(sal.getServerName(), regions); + } + return servers; + } + protected TreeMap> mockUniformClusterServers(int[] mockCluster) { int numServers = mockCluster.length; TreeMap> servers = new TreeMap<>(); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index 21f3a3b66c9a..cc16cfe2ec83 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -139,6 +139,8 @@ private ServerMetrics mockServerMetricsWithCpRequests(List regionsOn when(rl.getWriteRequestCount()).thenReturn(0L); when(rl.getMemStoreSize()).thenReturn(Size.ZERO); when(rl.getStoreFileSize()).thenReturn(Size.ZERO); + when(rl.getRegionSizeMB()).thenReturn(Size.ZERO); + when(rl.getCurrentRegionCachedRatio()).thenReturn(0.0f); regionLoadMap.put(info.getRegionName(), rl); } when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap); @@ -213,6 +215,8 @@ public void testKeepRegionLoad() throws Exception { when(rl.getWriteRequestCount()).thenReturn(0L); when(rl.getMemStoreSize()).thenReturn(Size.ZERO); when(rl.getStoreFileSize()).thenReturn(new Size(i, Size.Unit.MEGABYTE)); + when(rl.getRegionSizeMB()).thenReturn(Size.ZERO); + when(rl.getCurrentRegionCachedRatio()).thenReturn(0.0f); Map regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 12479979b2ba..2aa9ecf69ec4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1335,6 +1335,18 @@ public enum OperationStatusCode { */ public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size"; + /** + * If the chosen ioengine can persist its state across restarts, the path to the file to persist + * to. This file is NOT the data file. It is a file into which we will serialize the map of what + * is in the data file. For example, if you pass the following argument as + * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), + * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file + * /tmp/bucketcache.data but the metadata on where the data is in the supplied file + * is an in-memory map that needs to be persisted across restarts. Where to store this in-memory + * state is what you supply here: e.g. /tmp/bucketcache.map. + */ + public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; + /** * HConstants for fast fail on the client side follow */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 38a296aad523..6956d584d92a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import java.io.IOException; @@ -47,18 +48,6 @@ public final class BlockCacheFactory { public static final String BLOCKCACHE_POLICY_KEY = "hfile.block.cache.policy"; public static final String BLOCKCACHE_POLICY_DEFAULT = "LRU"; - /** - * If the chosen ioengine can persist its state across restarts, the path to the file to persist - * to. This file is NOT the data file. It is a file into which we will serialize the map of what - * is in the data file. For example, if you pass the following argument as - * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), - * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file - * /tmp/bucketcache.data but the metadata on where the data is in the supplied file - * is an in-memory map that needs to be persisted across restarts. Where to store this in-memory - * state is what you supply here: e.g. /tmp/bucketcache.map. - */ - public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; - public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index ca7750c92c56..f321d034bc6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -347,6 +347,7 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck fullyCachedFiles.clear(); backingMapValidated.set(true); bucketAllocator = new BucketAllocator(capacity, bucketSizes); + regionCachedSizeMap.clear(); } } else { bucketAllocator = new BucketAllocator(capacity, bucketSizes); @@ -1517,6 +1518,7 @@ private void disableCache() { // If persistent ioengine and a path, we will serialize out the backingMap. this.backingMap.clear(); this.fullyCachedFiles.clear(); + this.regionCachedSizeMap.clear(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java index 495814fdc5fe..88b8d62923ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.Shell; import org.apache.yetus.audience.InterfaceAudience; @@ -91,7 +92,8 @@ protected byte[] calculateChecksum(String algorithm) { private static long getFileSize(String filePath) throws IOException { DU.setExecCommand(filePath); DU.execute(); - return Long.parseLong(DU.getOutput().split("\t")[0]); + String size = DU.getOutput().split("\t")[0]; + return StringUtils.isEmpty(size.trim()) ? 0 : Long.parseLong(size); } private static class DuFileCommand extends Shell.ShellCommandExecutor { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java new file mode 100644 index 000000000000..3ecd8dc7cfd0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java @@ -0,0 +1,397 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Size; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category({ LargeTests.class }) +public class TestCacheAwareLoadBalancer extends BalancerTestBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCacheAwareLoadBalancer.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestCacheAwareLoadBalancer.class); + + private static CacheAwareLoadBalancer loadBalancer; + + static List servers; + + static List tableDescs; + + static Map tableMap = new HashMap<>(); + + static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), + TableName.valueOf("dt3"), TableName.valueOf("dt4") }; + + private static List generateServers(int numServers) { + List servers = new ArrayList<>(numServers); + Random rand = ThreadLocalRandom.current(); + for (int i = 0; i < numServers; i++) { + String host = "server" + rand.nextInt(100000); + int port = rand.nextInt(60000); + servers.add(ServerName.valueOf(host, port, -1)); + } + return servers; + } + + private static List constructTableDesc(boolean hasBogusTable) { + List tds = Lists.newArrayList(); + for (int i = 0; i < tables.length; i++) { + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tables[i]).build(); + tds.add(htd); + } + return tds; + } + + private ServerMetrics mockServerMetricsWithRegionCacheInfo(ServerName server, + List regionsOnServer, float currentCacheRatio, List oldRegionCacheInfo, + int oldRegionCachedSize, int regionSize) { + ServerMetrics serverMetrics = mock(ServerMetrics.class); + Map regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (RegionInfo info : regionsOnServer) { + RegionMetrics rl = mock(RegionMetrics.class); + when(rl.getReadRequestCount()).thenReturn(0L); + when(rl.getWriteRequestCount()).thenReturn(0L); + when(rl.getMemStoreSize()).thenReturn(Size.ZERO); + when(rl.getStoreFileSize()).thenReturn(Size.ZERO); + when(rl.getCurrentRegionCachedRatio()).thenReturn(currentCacheRatio); + when(rl.getRegionSizeMB()).thenReturn(new Size(regionSize, Size.Unit.MEGABYTE)); + regionLoadMap.put(info.getRegionName(), rl); + } + when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap); + Map oldCacheRatioMap = new HashMap<>(); + for (RegionInfo info : oldRegionCacheInfo) { + oldCacheRatioMap.put(info.getEncodedName(), oldRegionCachedSize); + } + when(serverMetrics.getRegionCachedInfo()).thenReturn(oldCacheRatioMap); + return serverMetrics; + } + + @BeforeClass + public static void beforeAllTests() throws Exception { + servers = generateServers(3); + tableDescs = constructTableDesc(false); + Configuration conf = HBaseConfiguration.create(); + conf.set(HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY, "prefetch_file_list"); + loadBalancer = new CacheAwareLoadBalancer(); + loadBalancer.setClusterInfoProvider(new DummyClusterInfoProvider(conf)); + loadBalancer.loadConf(conf); + } + + @Test + public void testRegionsNotCachedOnOldServerAndCurrentServer() throws Exception { + // The regions are not cached on old server as well as the current server. This causes + // skewness in the region allocation which should be fixed by the balancer + + Map> clusterState = new HashMap<>(); + ServerName server0 = servers.get(0); + ServerName server1 = servers.get(1); + ServerName server2 = servers.get(2); + + // Simulate that the regions previously hosted by server1 are now hosted on server0 + List regionsOnServer0 = randomRegions(10); + List regionsOnServer1 = randomRegions(0); + List regionsOnServer2 = randomRegions(5); + + clusterState.put(server0, regionsOnServer0); + clusterState.put(server1, regionsOnServer1); + clusterState.put(server2, regionsOnServer2); + + // Mock cluster metrics + Map serverMetricsMap = new TreeMap<>(); + serverMetricsMap.put(server0, mockServerMetricsWithRegionCacheInfo(server0, regionsOnServer0, + 0.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server1, mockServerMetricsWithRegionCacheInfo(server1, regionsOnServer1, + 0.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server2, mockServerMetricsWithRegionCacheInfo(server2, regionsOnServer2, + 0.0f, new ArrayList<>(), 0, 10)); + ClusterMetrics clusterMetrics = mock(ClusterMetrics.class); + when(clusterMetrics.getLiveServerMetrics()).thenReturn(serverMetricsMap); + loadBalancer.updateClusterMetrics(clusterMetrics); + + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(clusterState); + List plans = loadBalancer.balanceCluster(LoadOfAllTable); + Set regionsMovedFromServer0 = new HashSet<>(); + Map> targetServers = new HashMap<>(); + for (RegionPlan plan : plans) { + if (plan.getSource().equals(server0)) { + regionsMovedFromServer0.add(plan.getRegionInfo()); + if (!targetServers.containsKey(plan.getDestination())) { + targetServers.put(plan.getDestination(), new ArrayList<>()); + } + targetServers.get(plan.getDestination()).add(plan.getRegionInfo()); + } + } + // should move 5 regions from server0 to server 1 + assertEquals(5, regionsMovedFromServer0.size()); + assertEquals(5, targetServers.get(server1).size()); + } + + @Test + public void testRegionsPartiallyCachedOnOldServerAndNotCachedOnCurrentServer() throws Exception { + // The regions are partially cached on old server but not cached on the current server + + Map> clusterState = new HashMap<>(); + ServerName server0 = servers.get(0); + ServerName server1 = servers.get(1); + ServerName server2 = servers.get(2); + + // Simulate that the regions previously hosted by server1 are now hosted on server0 + List regionsOnServer0 = randomRegions(10); + List regionsOnServer1 = randomRegions(0); + List regionsOnServer2 = randomRegions(5); + + clusterState.put(server0, regionsOnServer0); + clusterState.put(server1, regionsOnServer1); + clusterState.put(server2, regionsOnServer2); + + // Mock cluster metrics + + // Mock 5 regions from server0 were previously hosted on server1 + List oldCachedRegions = regionsOnServer0.subList(5, regionsOnServer0.size() - 1); + + Map serverMetricsMap = new TreeMap<>(); + serverMetricsMap.put(server0, mockServerMetricsWithRegionCacheInfo(server0, regionsOnServer0, + 0.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server1, mockServerMetricsWithRegionCacheInfo(server1, regionsOnServer1, + 0.0f, oldCachedRegions, 6, 10)); + serverMetricsMap.put(server2, mockServerMetricsWithRegionCacheInfo(server2, regionsOnServer2, + 0.0f, new ArrayList<>(), 0, 10)); + ClusterMetrics clusterMetrics = mock(ClusterMetrics.class); + when(clusterMetrics.getLiveServerMetrics()).thenReturn(serverMetricsMap); + loadBalancer.updateClusterMetrics(clusterMetrics); + + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(clusterState); + List plans = loadBalancer.balanceCluster(LoadOfAllTable); + Set regionsMovedFromServer0 = new HashSet<>(); + Map> targetServers = new HashMap<>(); + for (RegionPlan plan : plans) { + if (plan.getSource().equals(server0)) { + regionsMovedFromServer0.add(plan.getRegionInfo()); + if (!targetServers.containsKey(plan.getDestination())) { + targetServers.put(plan.getDestination(), new ArrayList<>()); + } + targetServers.get(plan.getDestination()).add(plan.getRegionInfo()); + } + } + // should move 5 regions from server0 to server1 + assertEquals(5, regionsMovedFromServer0.size()); + assertEquals(5, targetServers.get(server1).size()); + assertTrue(targetServers.get(server1).containsAll(oldCachedRegions)); + } + + @Test + public void testRegionsFullyCachedOnOldServerAndNotCachedOnCurrentServers() throws Exception { + // The regions are fully cached on old server + + Map> clusterState = new HashMap<>(); + ServerName server0 = servers.get(0); + ServerName server1 = servers.get(1); + ServerName server2 = servers.get(2); + + // Simulate that the regions previously hosted by server1 are now hosted on server0 + List regionsOnServer0 = randomRegions(10); + List regionsOnServer1 = randomRegions(0); + List regionsOnServer2 = randomRegions(5); + + clusterState.put(server0, regionsOnServer0); + clusterState.put(server1, regionsOnServer1); + clusterState.put(server2, regionsOnServer2); + + // Mock cluster metrics + + // Mock 5 regions from server0 were previously hosted on server1 + List oldCachedRegions = regionsOnServer0.subList(5, regionsOnServer0.size() - 1); + + Map serverMetricsMap = new TreeMap<>(); + serverMetricsMap.put(server0, mockServerMetricsWithRegionCacheInfo(server0, regionsOnServer0, + 0.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server1, mockServerMetricsWithRegionCacheInfo(server1, regionsOnServer1, + 0.0f, oldCachedRegions, 10, 10)); + serverMetricsMap.put(server2, mockServerMetricsWithRegionCacheInfo(server2, regionsOnServer2, + 0.0f, new ArrayList<>(), 0, 10)); + ClusterMetrics clusterMetrics = mock(ClusterMetrics.class); + when(clusterMetrics.getLiveServerMetrics()).thenReturn(serverMetricsMap); + loadBalancer.updateClusterMetrics(clusterMetrics); + + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(clusterState); + List plans = loadBalancer.balanceCluster(LoadOfAllTable); + Set regionsMovedFromServer0 = new HashSet<>(); + Map> targetServers = new HashMap<>(); + for (RegionPlan plan : plans) { + if (plan.getSource().equals(server0)) { + regionsMovedFromServer0.add(plan.getRegionInfo()); + if (!targetServers.containsKey(plan.getDestination())) { + targetServers.put(plan.getDestination(), new ArrayList<>()); + } + targetServers.get(plan.getDestination()).add(plan.getRegionInfo()); + } + } + // should move 5 regions from server0 to server1 + assertEquals(5, regionsMovedFromServer0.size()); + assertEquals(5, targetServers.get(server1).size()); + assertTrue(targetServers.get(server1).containsAll(oldCachedRegions)); + } + + @Test + public void testRegionsFullyCachedOnOldAndCurrentServers() throws Exception { + // The regions are fully cached on old server + + Map> clusterState = new HashMap<>(); + ServerName server0 = servers.get(0); + ServerName server1 = servers.get(1); + ServerName server2 = servers.get(2); + + // Simulate that the regions previously hosted by server1 are now hosted on server0 + List regionsOnServer0 = randomRegions(10); + List regionsOnServer1 = randomRegions(0); + List regionsOnServer2 = randomRegions(5); + + clusterState.put(server0, regionsOnServer0); + clusterState.put(server1, regionsOnServer1); + clusterState.put(server2, regionsOnServer2); + + // Mock cluster metrics + + // Mock 5 regions from server0 were previously hosted on server1 + List oldCachedRegions = regionsOnServer0.subList(5, regionsOnServer0.size() - 1); + + Map serverMetricsMap = new TreeMap<>(); + serverMetricsMap.put(server0, mockServerMetricsWithRegionCacheInfo(server0, regionsOnServer0, + 1.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server1, mockServerMetricsWithRegionCacheInfo(server1, regionsOnServer1, + 1.0f, oldCachedRegions, 10, 10)); + serverMetricsMap.put(server2, mockServerMetricsWithRegionCacheInfo(server2, regionsOnServer2, + 1.0f, new ArrayList<>(), 0, 10)); + ClusterMetrics clusterMetrics = mock(ClusterMetrics.class); + when(clusterMetrics.getLiveServerMetrics()).thenReturn(serverMetricsMap); + loadBalancer.updateClusterMetrics(clusterMetrics); + + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(clusterState); + List plans = loadBalancer.balanceCluster(LoadOfAllTable); + Set regionsMovedFromServer0 = new HashSet<>(); + Map> targetServers = new HashMap<>(); + for (RegionPlan plan : plans) { + if (plan.getSource().equals(server0)) { + regionsMovedFromServer0.add(plan.getRegionInfo()); + if (!targetServers.containsKey(plan.getDestination())) { + targetServers.put(plan.getDestination(), new ArrayList<>()); + } + targetServers.get(plan.getDestination()).add(plan.getRegionInfo()); + } + } + // should move 5 regions from server0 to server1 + assertEquals(5, regionsMovedFromServer0.size()); + assertEquals(5, targetServers.get(server1).size()); + assertTrue(targetServers.get(server1).containsAll(oldCachedRegions)); + } + + @Test + public void testRegionsPartiallyCachedOnOldServerAndCurrentServer() throws Exception { + // The regions are partially cached on old server + + Map> clusterState = new HashMap<>(); + ServerName server0 = servers.get(0); + ServerName server1 = servers.get(1); + ServerName server2 = servers.get(2); + + // Simulate that the regions previously hosted by server1 are now hosted on server0 + List regionsOnServer0 = randomRegions(10); + List regionsOnServer1 = randomRegions(0); + List regionsOnServer2 = randomRegions(5); + + clusterState.put(server0, regionsOnServer0); + clusterState.put(server1, regionsOnServer1); + clusterState.put(server2, regionsOnServer2); + + // Mock cluster metrics + + // Mock 5 regions from server0 were previously hosted on server1 + List oldCachedRegions = regionsOnServer0.subList(5, regionsOnServer0.size() - 1); + + Map serverMetricsMap = new TreeMap<>(); + serverMetricsMap.put(server0, mockServerMetricsWithRegionCacheInfo(server0, regionsOnServer0, + 0.2f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server1, mockServerMetricsWithRegionCacheInfo(server1, regionsOnServer1, + 0.0f, oldCachedRegions, 6, 10)); + serverMetricsMap.put(server2, mockServerMetricsWithRegionCacheInfo(server2, regionsOnServer2, + 1.0f, new ArrayList<>(), 0, 10)); + ClusterMetrics clusterMetrics = mock(ClusterMetrics.class); + when(clusterMetrics.getLiveServerMetrics()).thenReturn(serverMetricsMap); + loadBalancer.updateClusterMetrics(clusterMetrics); + + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(clusterState); + List plans = loadBalancer.balanceCluster(LoadOfAllTable); + Set regionsMovedFromServer0 = new HashSet<>(); + Map> targetServers = new HashMap<>(); + for (RegionPlan plan : plans) { + if (plan.getSource().equals(server0)) { + regionsMovedFromServer0.add(plan.getRegionInfo()); + if (!targetServers.containsKey(plan.getDestination())) { + targetServers.put(plan.getDestination(), new ArrayList<>()); + } + targetServers.get(plan.getDestination()).add(plan.getRegionInfo()); + } + } + assertEquals(5, regionsMovedFromServer0.size()); + assertEquals(5, targetServers.get(server1).size()); + assertTrue(targetServers.get(server1).containsAll(oldCachedRegions)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancerCostFunctions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancerCostFunctions.java new file mode 100644 index 000000000000..448e576b1bc7 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancerCostFunctions.java @@ -0,0 +1,316 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Pair; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestCacheAwareLoadBalancerCostFunctions extends StochasticBalancerTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCacheAwareLoadBalancerCostFunctions.class); + + // Mapping of test -> expected cache cost + private final float[] expectedCacheCost = { 0.0f, 0.0f, 0.5f, 1.0f, 0.0f, 0.572f, 0.0f, 0.075f }; + + /** + * Data set to testCacheCost: [test][0][0] = mapping of server to number of regions it hosts + * [test][region + 1][0] = server that region is hosted on [test][region + 1][server + 1] = size + * of region cached on server + */ + private final int[][][] clusterRegionCacheRatioMocks = new int[][][] { + // Test 1: each region is entirely on server that hosts it + // Cost of moving the regions in this case should be high as the regions are fully cached + // on the server they are currently hosted on + new int[][] { new int[] { 2, 1, 1 }, // Server 0 has 2, server 1 has 1 and server 2 has 1 + // region(s) hosted respectively + new int[] { 0, 100, 0, 0 }, // region 0 is hosted and cached only on server 0 + new int[] { 0, 100, 0, 0 }, // region 1 is hosted and cached only on server 0 + new int[] { 1, 0, 100, 0 }, // region 2 is hosted and cached only on server 1 + new int[] { 2, 0, 0, 100 }, // region 3 is hosted and cached only on server 2 + }, + + // Test 2: each region is cached completely on the server it is currently hosted on, + // but it was also cached on some other server historically + // Cost of moving the regions in this case should be high as the regions are fully cached + // on the server they are currently hosted on. Although, the regions were previously hosted and + // cached on some other server, since they are completely cached on the new server, + // there is no need to move the regions back to the previously hosting cluster + new int[][] { new int[] { 1, 2, 1 }, // Server 0 has 1, server 1 has 2 and server 2 has 1 + // region(s) hosted respectively + new int[] { 0, 100, 0, 100 }, // region 0 is hosted and currently cached on server 0, + // but previously cached completely on server 2 + new int[] { 1, 100, 100, 0 }, // region 1 is hosted and currently cached on server 1, + // but previously cached completely on server 0 + new int[] { 1, 0, 100, 100 }, // region 2 is hosted and currently cached on server 1, + // but previously cached on server 2 + new int[] { 2, 0, 100, 100 }, // region 3 is hosted and currently cached on server 2, + // but previously cached on server 1 + }, + + // Test 3: The regions were hosted and fully cached on a server but later moved to other + // because of server crash procedure. The regions are partially cached on the server they + // are currently hosted on + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 50, 0, 100 }, // Region 0 is currently + // hosted and partially + // cached on + // server 0, but was fully + // cached on server 2 + // previously + new int[] { 1, 100, 50, 0 }, // Region 1 is currently hosted and partially cached on + // server 1, but was fully cached on server 0 previously + new int[] { 1, 0, 50, 100 }, // Region 2 is currently hosted and partially cached on + // server 1, but was fully cached on server 2 previously + new int[] { 2, 0, 100, 50 }, // Region 3 is currently hosted and partially cached on + // server 2, but was fully cached on server 1 previously + }, + + // Test 4: The regions were hosted and fully cached on a server, but later moved to other + // server because of server crash procedure. The regions are not at all cached on the server + // they are currently hosted on + new int[][] { new int[] { 1, 1, 2 }, new int[] { 0, 0, 0, 100 }, // Region 0 is currently hosted + // but not cached on server + // 0, + // but was fully cached on + // server 2 previously + new int[] { 1, 100, 0, 0 }, // Region 1 is currently hosted but not cached on server 1, + // but was fully cached on server 0 previously + new int[] { 2, 0, 100, 0 }, // Region 2 is currently hosted but not cached on server 2, + // but was fully cached on server 1 previously + new int[] { 2, 100, 0, 0 }, // Region 3 is currently hosted but not cached on server 2, + // but was fully cached on server 1 previously + }, + + // Test 5: The regions were partially cached on old servers, before moving to the new server + // where also, they are partially cached + new int[][] { new int[] { 2, 1, 1 }, new int[] { 0, 50, 50, 0 }, // Region 0 is hosted and + // partially cached on + // server 0, but + // was previously hosted and + // partially cached on + // server 1 + new int[] { 0, 50, 0, 50 }, // Region 1 is hosted and partially cached on server 0, but + // was previously hosted and partially cached on server 2 + new int[] { 1, 0, 50, 50 }, // Region 2 is hosted and partially cached on server 1, but + // was previously hosted and partially cached on server 2 + new int[] { 2, 0, 50, 50 }, // Region 3 is hosted and partially cached on server 2, but + // was previously hosted and partially cached on server 1 + }, + + // Test 6: The regions are less cached on the new servers as compared to what they were + // cached on the server before they were moved to the new servers + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 30, 70, 0 }, // Region 0 is hosted and + // cached 30% on server 0, + // but was + // previously hosted and + // cached 70% on server 1 + new int[] { 1, 70, 30, 0 }, // Region 1 is hosted and cached 30% on server 1, but was + // previously hosted and cached 70% on server 0 + new int[] { 1, 0, 30, 70 }, // Region 2 is hosted and cached 30% on server 1, but was + // previously hosted and cached 70% on server 2 + new int[] { 2, 0, 70, 30 }, // Region 3 is hosted and cached 30% on server 2, but was + // previously hosted and cached 70% on server 1 + }, + + // Test 7: The regions are more cached on the new servers as compared to what they were + // cached on the server before they were moved to the new servers + new int[][] { new int[] { 2, 1, 1 }, new int[] { 0, 80, 20, 0 }, // Region 0 is hosted and 80% + // cached on server 0, but + // was + // previously hosted and 20% + // cached on server 1 + new int[] { 0, 80, 0, 20 }, // Region 1 is hosted and 80% cached on server 0, but was + // previously hosted and 20% cached on server 2 + new int[] { 1, 20, 80, 0 }, // Region 2 is hosted and 80% cached on server 1, but was + // previously hosted and 20% cached on server 0 + new int[] { 2, 0, 20, 80 }, // Region 3 is hosted and 80% cached on server 2, but was + // previously hosted and 20% cached on server 1 + }, + + // Test 8: The regions are randomly assigned to the server with some regions historically + // hosted on other region servers + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 34, 0, 58 }, // Region 0 is hosted and + // partially cached on + // server 0, + // but was previously hosted + // and partially cached on + // server 2 + // current cache ratio < + // historical cache ratio + new int[] { 1, 78, 100, 0 }, // Region 1 is hosted and fully cached on server 1, + // but was previously hosted and partially cached on server 0 + // current cache ratio > historical cache ratio + new int[] { 1, 66, 66, 0 }, // Region 2 is hosted and partially cached on server 1, + // but was previously hosted and partially cached on server 0 + // current cache ratio == historical cache ratio + new int[] { 2, 0, 0, 96 }, // Region 3 is hosted and partially cached on server 0 + // No historical cache ratio + }, }; + + private static Configuration storedConfiguration; + + private CacheAwareLoadBalancer loadBalancer = new CacheAwareLoadBalancer(); + + @BeforeClass + public static void saveInitialConfiguration() { + storedConfiguration = new Configuration(conf); + } + + @Before + public void beforeEachTest() { + conf = new Configuration(storedConfiguration); + loadBalancer.loadConf(conf); + } + + @Test + public void testVerifyCacheAwareSkewnessCostFunctionEnabled() { + CacheAwareLoadBalancer lb = new CacheAwareLoadBalancer(); + lb.loadConf(conf); + assertTrue(Arrays.asList(lb.getCostFunctionNames()) + .contains(CacheAwareLoadBalancer.CacheAwareRegionSkewnessCostFunction.class.getSimpleName())); + } + + @Test + public void testVerifyCacheAwareSkewnessCostFunctionDisabled() { + conf.setFloat( + CacheAwareLoadBalancer.CacheAwareRegionSkewnessCostFunction.REGION_COUNT_SKEW_COST_KEY, 0.0f); + + CacheAwareLoadBalancer lb = new CacheAwareLoadBalancer(); + lb.loadConf(conf); + + assertFalse(Arrays.asList(lb.getCostFunctionNames()) + .contains(CacheAwareLoadBalancer.CacheAwareRegionSkewnessCostFunction.class.getSimpleName())); + } + + @Test + public void testVerifyCacheCostFunctionEnabled() { + conf.set(HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY, "/tmp/prefetch.persistence"); + + CacheAwareLoadBalancer lb = new CacheAwareLoadBalancer(); + lb.loadConf(conf); + + assertTrue(Arrays.asList(lb.getCostFunctionNames()) + .contains(CacheAwareLoadBalancer.CacheAwareCostFunction.class.getSimpleName())); + } + + @Test + public void testVerifyCacheCostFunctionDisabledByNoBucketCachePersistence() { + assertFalse(Arrays.asList(loadBalancer.getCostFunctionNames()) + .contains(CacheAwareLoadBalancer.CacheAwareCostFunction.class.getSimpleName())); + } + + @Test + public void testVerifyCacheCostFunctionDisabledByNoMultiplier() { + conf.set(HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY, "/tmp/prefetch.persistence"); + conf.setFloat("hbase.master.balancer.stochastic.cacheCost", 0.0f); + assertFalse(Arrays.asList(loadBalancer.getCostFunctionNames()) + .contains(CacheAwareLoadBalancer.CacheAwareCostFunction.class.getSimpleName())); + } + + @Test + public void testCacheCost() { + conf.set(HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY, "/tmp/prefetch.persistence"); + CacheAwareLoadBalancer.CacheAwareCostFunction costFunction = + new CacheAwareLoadBalancer.CacheAwareCostFunction(conf); + + for (int test = 0; test < clusterRegionCacheRatioMocks.length; test++) { + int[][] clusterRegionLocations = clusterRegionCacheRatioMocks[test]; + MockClusterForCacheCost cluster = new MockClusterForCacheCost(clusterRegionLocations); + costFunction.prepare(cluster); + double cost = costFunction.cost(); + assertEquals(expectedCacheCost[test], cost, 0.01); + } + } + + private class MockClusterForCacheCost extends BalancerClusterState { + private final Map, Float> regionServerCacheRatio = new HashMap<>(); + + public MockClusterForCacheCost(int[][] regionsArray) { + // regions[0] is an array where index = serverIndex and value = number of regions + super(mockClusterServersUnsorted(regionsArray[0], 1), null, null, null, null); + Map> oldCacheRatio = new HashMap<>(); + for (int i = 1; i < regionsArray.length; i++) { + int regionIndex = i - 1; + for (int j = 1; j < regionsArray[i].length; j++) { + int serverIndex = j - 1; + float cacheRatio = (float) regionsArray[i][j] / 100; + regionServerCacheRatio.put(new Pair<>(regionIndex, serverIndex), cacheRatio); + if (cacheRatio > 0.0f && serverIndex != regionsArray[i][0]) { + // This is the historical cacheRatio value + oldCacheRatio.put(regions[regionIndex].getEncodedName(), + new Pair<>(servers[serverIndex], cacheRatio)); + } + } + } + regionCacheRatioOnOldServerMap = oldCacheRatio; + } + + @Override + public int getTotalRegionHFileSizeMB(int region) { + return 1; + } + + @Override + protected float getRegionCacheRatioOnRegionServer(int region, int regionServerIndex) { + float cacheRatio = 0.0f; + + // Get the cache ratio if the region is currently hosted on this server + if (regionServerIndex == regionIndexToServerIndex[region]) { + return regionServerCacheRatio.get(new Pair<>(region, regionServerIndex)); + } + + // Region is not currently hosted on this server. Check if the region was cached on this + // server earlier. This can happen when the server was shutdown and the cache was persisted. + // Search using the index name and server name and not the index id and server id as these + // ids may change when a server is marked as dead or a new server is added. + String regionEncodedName = regions[region].getEncodedName(); + ServerName serverName = servers[regionServerIndex]; + if ( + regionCacheRatioOnOldServerMap != null + && regionCacheRatioOnOldServerMap.containsKey(regionEncodedName) + ) { + Pair serverCacheRatio = + regionCacheRatioOnOldServerMap.get(regionEncodedName); + if (ServerName.isSameAddress(serverName, serverCacheRatio.getFirst())) { + cacheRatio = serverCacheRatio.getSecond(); + regionCacheRatioOnOldServerMap.remove(regionEncodedName); + } + } + return cacheRatio; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java index 748045246b3b..67ef296da58b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java @@ -86,6 +86,8 @@ private ServerMetrics mockServerMetricsWithReadRequests(ServerName server, when(rl.getWriteRequestCount()).thenReturn(0L); when(rl.getMemStoreSize()).thenReturn(Size.ZERO); when(rl.getStoreFileSize()).thenReturn(Size.ZERO); + when(rl.getRegionSizeMB()).thenReturn(Size.ZERO); + when(rl.getCurrentRegionCachedRatio()).thenReturn(0.0f); regionLoadMap.put(info.getRegionName(), rl); } when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap); From b1ccf33382780c27d96340dcfd1aec312ed2cb73 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 22 Nov 2023 10:37:42 +0800 Subject: [PATCH 147/514] HBASE-28210 Addendum fix TestProcedureAdmin (#5532) --- .../hadoop/hbase/master/procedure/TestProcedureAdmin.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java index 452a73d26ee0..94539572a99c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java @@ -155,8 +155,7 @@ public void testAbortProcedureInterruptedNotAllowed() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo[] regions = - MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Submit a procedure @@ -164,6 +163,11 @@ public void testAbortProcedureInterruptedNotAllowed() throws Exception { .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, true)); // Wait for one step to complete ProcedureTestingUtility.waitProcedure(procExec, procId); + // After HBASE-28210, the injection of kill before update is moved before we add rollback + // step, so here we need to run two steps, otherwise we will not consider the procedure as + // executed + MasterProcedureTestingUtility.restartMasterProcedureExecutor(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); // Set the mayInterruptIfRunning flag to false boolean abortResult = procExec.abort(procId, false); From 1203c2014b52dfd25973e755bf2b1870b10d8859 Mon Sep 17 00:00:00 2001 From: Rahul Agarkar Date: Thu, 2 Nov 2023 22:15:38 +0530 Subject: [PATCH 148/514] =?UTF-8?q?HBASE-28097=20Add=20documentation=20sec?= =?UTF-8?q?tion=20for=20the=20Cache=20Aware=20balancer=20fu=E2=80=A6=20(#5?= =?UTF-8?q?495)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Wellington Chevreuil --- src/main/asciidoc/_chapters/architecture.adoc | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index 23d069c1d919..12bdc09ac764 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -1130,6 +1130,49 @@ For a RegionServer hosting data that can comfortably fit into cache, or if your The compressed BlockCache is disabled by default. To enable it, set `hbase.block.data.cachecompressed` to `true` in _hbase-site.xml_ on all RegionServers. +==== Cache Aware Load Balancer + +Depending on the data size and the configured cache size, the cache warm up can take anywhere from a few minutes to a few hours. This becomes even more critical for HBase deployments over cloud storage, where compute is separated from storage. Doing this everytime the region server starts can be a very expensive process. To eliminate this, link:https://issues.apache.org/jira/browse/HBASE-27313[HBASE-27313] implemented the cache persistence feature where the region servers periodically persist the blocks cached in the bucket cache. This persisted information is then used to resurrect the cache in the event of a region server restart because of normal restart or crash. + +link:https://issues.apache.org/jira/browse/HBASE-27999[HBASE-27999] implements the cache aware load balancer, which adds to the load balancer the ability to consider the cache allocation of each region on region servers when calculating a new assignment plan, using the region/region server cache allocation information reported by region servers to calculate the percentage of HFiles cached for each region on the hosting server. This information is then used by the balancer as a factor when deciding on an optimal, new assignment plan. + +The master node captures the caching information from all the region servers and uses this information to decide on new region assignments while ensuring a minimal impact on the current cache allocation. A region is assigned to the region server where it has a better cache ratio as compared to the region server where it is currently hosted. + +The CacheAwareLoadBalancer uses two cost elements for deciding the region allocation. These are described below: + +. Cache Cost ++ + +The cache cost is calculated as the percentage of data for a region cached on the region server where it is either currently hosted or was previously hosted. A region may have multiple HFiles, each of different sizes. A HFile is considered to be fully prefetched when all the data blocks in this file are in the cache. The region server hosting this region calculates the ratio of number of HFiles fully cached in the cache to the total number of HFiles in the region. This ratio will vary from 0 (region hosted on this server, but none of its HFiles are cached into the cache) to 1 (region hosted on this server and all the HFiles for this region are cached into the cache). ++ +Every region server maintains this information for all the regions currently hosted there. In addition to that, this cache ratio is also maintained for the regions which were previously hosted on this region server giving historical information about the regions. + +. Skewness Cost ++ + + +The cache aware balancer will consider cache cost with the skewness cost to decide on the region assignment plan under following conditions: + +. There is an idle server in the cluster. This can happen when an existing server is restarted or a new server is added to the cluster. + +. When the cost of maintaining the balance in the cluster is greater than the minimum threshold defined by the configuration _hbase.master.balancer.stochastic.minCostNeedBalance_. + + +The CacheAwareLoadBalancer can be enabled in the cluster by setting the following configuration properties in the master master configuration: + +[source,xml] +---- + + hbase.master.loadbalancer.class + org.apache.hadoop.hbase.master.balancer.CacheAwareLoadBalancer + + + hbase.bucketcache.persistent.path + /path/to/bucketcache_persistent_file + +---- + + [[regionserver_splitting_implementation]] === RegionServer Splitting Implementation From 3e7230a24b63681ded7d9f45b065921965d52b9f Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Thu, 23 Nov 2023 17:11:50 +0000 Subject: [PATCH 149/514] HBASE-28176 PrefetchExecutor should stop once cache reaches capacity (#5474) Signed-off-by: Peter Somogyi --- .../hadoop/hbase/io/hfile/BlockCache.java | 76 ++++++++++++++- .../hbase/io/hfile/CombinedBlockCache.java | 51 ++++++++++ .../hbase/io/hfile/HFilePreadReader.java | 86 ++++++++++------- .../hbase/io/hfile/HFileReaderImpl.java | 4 +- .../hbase/io/hfile/bucket/BucketCache.java | 95 ++++++++++++++++++- .../hadoop/hbase/io/hfile/TestPrefetch.java | 1 - .../hbase/io/hfile/TestPrefetchRSClose.java | 12 +-- .../io/hfile/TestPrefetchWithBucketCache.java | 91 +++++++++++++++--- .../io/hfile/bucket/TestBucketCache.java | 3 - .../bucket/TestBucketCachePersister.java | 51 ++++++++-- .../hfile/bucket/TestPrefetchPersistence.java | 3 - 11 files changed, 397 insertions(+), 76 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 91ebaaabd422..a62ca853ca60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -20,6 +20,7 @@ import java.util.Iterator; import java.util.Map; import java.util.Optional; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; @@ -166,7 +167,80 @@ default boolean isMetaBlock(BlockType blockType) { } /** - * Returns the list of fully cached files + * Notifies the cache implementation that the given file has been fully cached (all its blocks + * made into the cache). + * @param fileName the file that has been completely cached. + */ + default void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount, + long size) { + // noop + } + + /** + * Notifies the cache implementation that the given file had a block evicted + * @param fileName the file had a block evicted. + */ + default void notifyFileBlockEvicted(String fileName) { + // noop + } + + /** + * Checks whether there's enough space left in the cache to accommodate the passed block. This + * method may not be overridden by all implementing classes. In such cases, the returned Optional + * will be empty. For subclasses implementing this logic, the returned Optional would contain the + * boolean value reflecting if the passed block fits into the remaining cache space available. + * @param block the block we want to check if fits into the cache. + * @return empty optional if this method is not supported, otherwise the returned optional + * contains the boolean value informing if the block fits into the cache available space. + */ + default Optional blockFitsIntoTheCache(HFileBlock block) { + return Optional.empty(); + } + + /** + * Checks whether blocks for the passed file should be cached or not. This method may not be + * overridden by all implementing classes. In such cases, the returned Optional will be empty. For + * subclasses implementing this logic, the returned Optional would contain the boolean value + * reflecting if the passed file should indeed be cached. + * @param fileName to check if it should be cached. + * @return empty optional if this method is not supported, otherwise the returned optional + * contains the boolean value informing if the file should be cached. + */ + default Optional shouldCacheFile(String fileName) { + return Optional.empty(); + } + + /** + * Checks whether the block for the passed key is already cached. This method may not be + * overridden by all implementing classes. In such cases, the returned Optional will be empty. For + * subclasses implementing this logic, the returned Optional would contain the boolean value + * reflecting if the block for the passed key is already cached or not. + * @param key for the block we want to check if it's already in the cache. + * @return empty optional if this method is not supported, otherwise the returned optional + * contains the boolean value informing if the block is already cached. + */ + default Optional isAlreadyCached(BlockCacheKey key) { + return Optional.empty(); + } + + /** + * Returns an Optional containing the size of the block related to the passed key. If the block is + * not in the cache, returned optional will be empty. Also, this method may not be overridden by + * all implementing classes. In such cases, the returned Optional will be empty. + * @param key for the block we want to check if it's already in the cache. + * @return empty optional if this method is not supported, otherwise the returned optional + * contains the boolean value informing if the block is already cached. + */ + default Optional getBlockSize(BlockCacheKey key) { + return Optional.empty(); + } + + /** + * Returns an Optional containing the map of files that have been fully cached (all its blocks are + * present in the cache. This method may not be overridden by all implementing classes. In such + * cases, the returned Optional will be empty. + * @return empty optional if this method is not supported, otherwise the returned optional + * contains a map of all files that have been fully cached. */ default Optional>> getFullyCachedFiles() { return Optional.empty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 1e0fe7709292..427f1771e669 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -20,6 +20,9 @@ import java.util.Iterator; import java.util.Map; import java.util.Optional; +import org.apache.commons.lang3.mutable.Mutable; +import org.apache.commons.lang3.mutable.MutableBoolean; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.util.Pair; @@ -454,4 +457,52 @@ public FirstLevelBlockCache getFirstLevelCache() { public BlockCache getSecondLevelCache() { return l2Cache; } + + @Override + public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount, + long size) { + l1Cache.getBlockCount(); + l1Cache.notifyFileCachingCompleted(fileName, totalBlockCount, dataBlockCount, size); + l2Cache.notifyFileCachingCompleted(fileName, totalBlockCount, dataBlockCount, size); + + } + + @Override + public void notifyFileBlockEvicted(String fileName) { + l1Cache.notifyFileBlockEvicted(fileName); + l1Cache.notifyFileBlockEvicted(fileName); + } + + @Override + public Optional blockFitsIntoTheCache(HFileBlock block) { + if (isMetaBlock(block.getBlockType())) { + return l1Cache.blockFitsIntoTheCache(block); + } else { + return l2Cache.blockFitsIntoTheCache(block); + } + } + + @Override + public Optional shouldCacheFile(String fileName) { + Optional l1Result = l1Cache.shouldCacheFile(fileName); + Optional l2Result = l2Cache.shouldCacheFile(fileName); + final Mutable combinedResult = new MutableBoolean(true); + l1Result.ifPresent(b -> combinedResult.setValue(b && combinedResult.getValue())); + l2Result.ifPresent(b -> combinedResult.setValue(b && combinedResult.getValue())); + return Optional.of(combinedResult.getValue()); + } + + @Override + public Optional isAlreadyCached(BlockCacheKey key) { + boolean result = + l1Cache.isAlreadyCached(key).orElseGet(() -> l2Cache.isAlreadyCached(key).orElse(false)); + return Optional.of(result); + } + + @Override + public Optional getBlockSize(BlockCacheKey key) { + Optional l1Result = l1Cache.getBlockSize(key); + return l1Result.isPresent() ? l1Result : l2Cache.getBlockSize(key); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 7cdbd5aff486..92f6a8169f32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -23,8 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; -import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,17 +37,15 @@ public class HFilePreadReader extends HFileReaderImpl { public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); - final MutableBoolean fileAlreadyCached = new MutableBoolean(false); - Optional bucketCacheOptional = - BucketCache.getBucketCacheFromCacheConfig(cacheConf); - bucketCacheOptional.flatMap(BucketCache::getFullyCachedFiles).ifPresent(fcf -> { - fileAlreadyCached.setValue(fcf.get(path.getName()) == null ? false : true); + final MutableBoolean shouldCache = new MutableBoolean(true); + + cacheConf.getBlockCache().ifPresent(cache -> { + Optional result = cache.shouldCacheFile(path.getName()); + shouldCache.setValue(result.isPresent() ? result.get().booleanValue() : true); }); + // Prefetch file blocks upon open if requested - if ( - cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() - && !fileAlreadyCached.booleanValue() - ) { + if (cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() && shouldCache.booleanValue()) { PrefetchExecutor.request(path, new Runnable() { @Override public void run() { @@ -70,34 +66,38 @@ public void run() { } // Don't use BlockIterator here, because it's designed to read load-on-open section. long onDiskSizeOfNextBlock = -1; + // if we are here, block cache is present anyways + BlockCache cache = cacheConf.getBlockCache().get(); + boolean interrupted = false; + int blockCount = 0; + int dataBlockCount = 0; while (offset < end) { if (Thread.interrupted()) { break; } - // BucketCache can be persistent and resilient to restarts, so we check first if the - // block exists on its in-memory index, if so, we just update the offset and move on - // to the next block without actually going read all the way to the cache. - if (bucketCacheOptional.isPresent()) { - BucketCache cache = bucketCacheOptional.get(); - if (cache.getBackingMapValidated().get()) { - BlockCacheKey cacheKey = new BlockCacheKey(name, offset); - BucketEntry entry = cache.getBackingMap().get(cacheKey); - if (entry != null) { - cacheKey = new BlockCacheKey(name, offset); - entry = cache.getBackingMap().get(cacheKey); - if (entry == null) { - LOG.debug("No cache key {}, we'll read and cache it", cacheKey); - } else { - offset += entry.getOnDiskSizeWithHeader(); - LOG.debug( - "Found cache key {}. Skipping prefetch, the block is already cached.", - cacheKey); - continue; - } - } else { - LOG.debug("No entry in the backing map for cache key {}", cacheKey); - } + // Some cache implementations can be persistent and resilient to restarts, + // so we check first if the block exists on its in-memory index, if so, we just + // update the offset and move on to the next block without actually going read all + // the way to the cache. + BlockCacheKey cacheKey = new BlockCacheKey(name, offset); + if (cache.isAlreadyCached(cacheKey).orElse(false)) { + // Right now, isAlreadyCached is only supported by BucketCache, which should + // always cache data blocks. + int size = cache.getBlockSize(cacheKey).orElse(0); + if (size > 0) { + offset += size; + LOG.debug("Found block of size {} for cache key {}. " + + "Skipping prefetch, the block is already cached.", size, cacheKey); + blockCount++; + dataBlockCount++; + continue; + } else { + LOG.debug("Found block for cache key {}, but couldn't get its size. " + + "Maybe the cache implementation doesn't support it? " + + "We'll need to read the block from cache or file system. ", cacheKey); } + } else { + LOG.debug("No entry in the backing map for cache key {}. ", cacheKey); } // Perhaps we got our block from cache? Unlikely as this may be, if it happens, then // the internal-to-hfileblock thread local which holds the overread that gets the @@ -106,8 +106,20 @@ public void run() { HFileBlock block = prefetchStreamReader.readBlock(offset, onDiskSizeOfNextBlock, /* cacheBlock= */true, /* pread= */false, false, false, null, null, true); try { + if (!cacheConf.isInMemory() && !cache.blockFitsIntoTheCache(block).orElse(true)) { + LOG.warn( + "Interrupting prefetch for file {} because block {} of size {} " + + "doesn't fit in the available cache space.", + path, cacheKey, block.getOnDiskSizeWithHeader()); + interrupted = true; + break; + } onDiskSizeOfNextBlock = block.getNextBlockOnDiskSize(); offset += block.getOnDiskSizeWithHeader(); + blockCount++; + if (block.getBlockType().isData()) { + dataBlockCount++; + } } finally { // Ideally here the readBlock won't find the block in cache. We call this // readBlock so that block data is read from FS and cached in BC. we must call @@ -115,8 +127,10 @@ public void run() { block.release(); } } - final long fileSize = offset; - bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path, fileSize)); + if (!interrupted) { + cacheConf.getBlockCache().get().notifyFileCachingCompleted(path, blockCount, + dataBlockCount, offset); + } } catch (IOException e) { // IOExceptions are probably due to region closes (relocation, etc.) if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index f31d202782fa..9d431428f376 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1360,9 +1360,9 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo // Don't need the unpacked block back and we're storing the block in the cache compressed if (cacheOnly && cacheCompressed && cacheOnRead) { - LOG.debug("Skipping decompression of block {} in prefetch", cacheKey); - // Cache the block if necessary cacheConf.getBlockCache().ifPresent(cache -> { + LOG.debug("Skipping decompression of block {} in prefetch", cacheKey); + // Cache the block if necessary if (cacheable && cacheConf.shouldCacheBlockOnRead(category)) { cache.cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory(), cacheOnly); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index f321d034bc6b..ba33d5e02c48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -51,6 +51,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.function.Function; +import org.apache.commons.lang3.mutable.MutableInt; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -1328,7 +1329,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { } assert !cacheEnabled; - try (FileInputStream in = deleteFileOnClose(persistenceFile)) { + try (FileInputStream in = new FileInputStream(persistenceFile)) { int pblen = ProtobufMagic.lengthOfPBMagic(); byte[] pbuf = new byte[pblen]; int read = in.read(pbuf); @@ -1992,6 +1993,11 @@ public void clear() { re.getData().release(); } } + + public boolean hasBlocksForFile(String fileName) { + return delegate.keySet().stream().filter(key -> key.getHfileName().equals(fileName)) + .findFirst().isPresent(); + } } public Map getBackingMap() { @@ -2051,4 +2057,91 @@ public void fileCacheCompleted(Path filePath, long size) { regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf + fileSize); } + @Override + public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount, + long size) { + // block eviction may be happening in the background as prefetch runs, + // so we need to count all blocks for this file in the backing map under + // a read lock for the block offset + final List locks = new ArrayList<>(); + LOG.debug("Notifying caching completed for file {}, with total blocks {}", fileName, + dataBlockCount); + try { + final MutableInt count = new MutableInt(); + LOG.debug("iterating over {} entries in the backing map", backingMap.size()); + backingMap.entrySet().stream().forEach(entry -> { + if (entry.getKey().getHfileName().equals(fileName.getName())) { + LOG.debug("found block for file {} in the backing map. Acquiring read lock for offset {}", + fileName, entry.getKey().getOffset()); + ReentrantReadWriteLock lock = offsetLock.getLock(entry.getKey().getOffset()); + lock.readLock().lock(); + locks.add(lock); + if (backingMap.containsKey(entry.getKey())) { + count.increment(); + } + } + }); + // We may either place only data blocks on the BucketCache or all type of blocks + if (dataBlockCount == count.getValue() || totalBlockCount == count.getValue()) { + LOG.debug("File {} has now been fully cached.", fileName); + fileCacheCompleted(fileName, size); + } else { + LOG.debug( + "Prefetch executor completed for {}, but only {} blocks were cached. " + + "Total blocks for file: {}. Checking for blocks pending cache in cache writer queue.", + fileName, count.getValue(), dataBlockCount); + if (ramCache.hasBlocksForFile(fileName.getName())) { + LOG.debug("There are still blocks pending caching for file {}. Will sleep 100ms " + + "and try the verification again.", fileName); + Thread.sleep(100); + notifyFileCachingCompleted(fileName, totalBlockCount, dataBlockCount, size); + } else { + LOG.info( + "We found only {} blocks cached from a total of {} for file {}, " + + "but no blocks pending caching. Maybe cache is full?", + count, dataBlockCount, fileName); + } + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } finally { + for (ReentrantReadWriteLock lock : locks) { + lock.readLock().unlock(); + } + } + } + + @Override + public void notifyFileBlockEvicted(String fileName) { + fullyCachedFiles.remove(fileName); + } + + @Override + public Optional blockFitsIntoTheCache(HFileBlock block) { + long currentUsed = bucketAllocator.getUsedSize(); + boolean result = (currentUsed + block.getOnDiskSizeWithHeader()) < acceptableSize(); + return Optional.of(result); + } + + @Override + public Optional shouldCacheFile(String fileName) { + // if we don't have the file in fullyCachedFiles, we should cache it + return Optional.of(!fullyCachedFiles.containsKey(fileName)); + } + + @Override + public Optional isAlreadyCached(BlockCacheKey key) { + return Optional.of(getBackingMap().containsKey(key)); + } + + @Override + public Optional getBlockSize(BlockCacheKey key) { + BucketEntry entry = backingMap.get(key); + if (entry == null) { + return Optional.empty(); + } else { + return Optional.of(entry.getOnDiskSizeWithHeader()); + } + + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index cdf9faf2490b..b58319179c56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -237,7 +237,6 @@ public void testPrefetchCompressed() throws Exception { Path storeFile = writeStoreFile("TestPrefetchCompressed", context); readStoreFileCacheOnly(storeFile); conf.setBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, false); - } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java index 879d8566c82e..a5023d5da004 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchRSClose.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.File; @@ -67,7 +66,7 @@ public class TestPrefetchRSClose { MiniZooKeeperCluster zkCluster; SingleProcessHBaseCluster cluster; StartTestingClusterOption option = - StartTestingClusterOption.builder().numRegionServers(2).build(); + StartTestingClusterOption.builder().numRegionServers(1).build(); @Before public void setup() throws Exception { @@ -81,7 +80,6 @@ public void setup() throws Exception { conf.set("hbase.bucketcache.persistent.path", testDir + "/bucket.persistence"); zkCluster = TEST_UTIL.startMiniZKCluster(); cluster = TEST_UTIL.startMiniHBaseCluster(option); - assertEquals(2, cluster.getRegionServerThreads().size()); cluster.setConf(conf); } @@ -117,9 +115,7 @@ public void testPrefetchPersistence() throws Exception { // Default interval for cache persistence is 1000ms. So after 1000ms, both the persistence files // should exist. - HRegionServer regionServingRS = cluster.getRegionServer(1).getRegions(tableName).size() == 1 - ? cluster.getRegionServer(1) - : cluster.getRegionServer(0); + HRegionServer regionServingRS = cluster.getRegionServer(0); Admin admin = TEST_UTIL.getAdmin(); List cachedFilesList = admin.getCachedFilesList(regionServingRS.getServerName()); @@ -133,10 +129,6 @@ public void testPrefetchPersistence() throws Exception { LOG.info("Stopped Region Server 0."); Thread.sleep(1000); assertTrue(new File(testDir + "/bucket.persistence").exists()); - - // Start the RS and validate - cluster.startRegionServer(); - assertFalse(new File(testDir + "/bucket.persistence").exists()); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index 93f09231f740..446298235471 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; +import static org.apache.hadoop.hbase.io.hfile.BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -38,12 +39,16 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -72,8 +77,6 @@ public class TestPrefetchWithBucketCache { private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; private static final int DATA_BLOCK_SIZE = 2048; - private static final int NUM_KV = 100; - private Configuration conf; private CacheConfig cacheConf; private FileSystem fs; @@ -87,9 +90,6 @@ public void setUp() throws IOException { File testDir = new File(name.getMethodName()); testDir.mkdir(); conf.set(BUCKET_CACHE_IOENGINE_KEY, "file:/" + testDir.getAbsolutePath() + "/bucket.cache"); - conf.setLong(BUCKET_CACHE_SIZE_KEY, 200); - blockCache = BlockCacheFactory.createBlockCache(conf); - cacheConf = new CacheConfig(conf, blockCache); } @After @@ -102,7 +102,10 @@ public void tearDown() { @Test public void testPrefetchDoesntOverwork() throws Exception { - Path storeFile = writeStoreFile("TestPrefetchDoesntOverwork"); + conf.setLong(BUCKET_CACHE_SIZE_KEY, 200); + blockCache = BlockCacheFactory.createBlockCache(conf); + cacheConf = new CacheConfig(conf, blockCache); + Path storeFile = writeStoreFile("TestPrefetchDoesntOverwork", 100); // Prefetches the file blocks LOG.debug("First read should prefetch the blocks."); readStoreFile(storeFile); @@ -123,7 +126,7 @@ public void testPrefetchDoesntOverwork() throws Exception { BlockCacheKey key = snapshot.keySet().stream().findFirst().get(); LOG.debug("removing block {}", key); bc.getBackingMap().remove(key); - bc.getFullyCachedFiles().ifPresent(fcf -> fcf.remove(storeFile.getName())); + bc.getFullyCachedFiles().get().remove(storeFile.getName()); assertTrue(snapshot.size() > bc.getBackingMap().size()); LOG.debug("Third read should prefetch again, as we removed one block for the file."); readStoreFile(storeFile); @@ -131,6 +134,57 @@ public void testPrefetchDoesntOverwork() throws Exception { assertTrue(snapshot.get(key).getCachedTime() < bc.getBackingMap().get(key).getCachedTime()); } + @Test + public void testPrefetchInterruptOnCapacity() throws Exception { + conf.setLong(BUCKET_CACHE_SIZE_KEY, 1); + conf.set(BUCKET_CACHE_BUCKETS_KEY, "3072"); + conf.setDouble("hbase.bucketcache.acceptfactor", 0.98); + conf.setDouble("hbase.bucketcache.minfactor", 0.95); + conf.setDouble("hbase.bucketcache.extrafreefactor", 0.01); + blockCache = BlockCacheFactory.createBlockCache(conf); + cacheConf = new CacheConfig(conf, blockCache); + Path storeFile = writeStoreFile("testPrefetchInterruptOnCapacity", 10000); + // Prefetches the file blocks + LOG.debug("First read should prefetch the blocks."); + createReaderAndWaitForPrefetchInterruption(storeFile); + BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get(); + long evictionsFirstPrefetch = bc.getStats().getEvictionCount(); + LOG.debug("evictions after first prefetch: {}", bc.getStats().getEvictionCount()); + HFile.Reader reader = createReaderAndWaitForPrefetchInterruption(storeFile); + LOG.debug("evictions after second prefetch: {}", bc.getStats().getEvictionCount()); + assertTrue((bc.getStats().getEvictionCount() - evictionsFirstPrefetch) < 10); + HFileScanner scanner = reader.getScanner(conf, true, true); + scanner.seekTo(); + while (scanner.next()) { + // do a full scan to force some evictions + LOG.trace("Iterating the full scan to evict some blocks"); + } + scanner.close(); + LOG.debug("evictions after scanner: {}", bc.getStats().getEvictionCount()); + // The scanner should had triggered at least 3x evictions from the prefetch, + // as we try cache each block without interruption. + assertTrue(bc.getStats().getEvictionCount() > evictionsFirstPrefetch); + } + + @Test + public void testPrefetchDoesntInterruptInMemoryOnCapacity() throws Exception { + conf.setLong(BUCKET_CACHE_SIZE_KEY, 1); + conf.set(BUCKET_CACHE_BUCKETS_KEY, "3072"); + conf.setDouble("hbase.bucketcache.acceptfactor", 0.98); + conf.setDouble("hbase.bucketcache.minfactor", 0.95); + conf.setDouble("hbase.bucketcache.extrafreefactor", 0.01); + blockCache = BlockCacheFactory.createBlockCache(conf); + ColumnFamilyDescriptor family = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setInMemory(true).build(); + cacheConf = new CacheConfig(conf, family, blockCache, ByteBuffAllocator.HEAP); + Path storeFile = writeStoreFile("testPrefetchDoesntInterruptInMemoryOnCapacity", 10000); + // Prefetches the file blocks + LOG.debug("First read should prefetch the blocks."); + createReaderAndWaitForPrefetchInterruption(storeFile); + BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get(); + assertTrue(bc.getStats().getEvictedCount() > 200); + } + private void readStoreFile(Path storeFilePath) throws Exception { readStoreFile(storeFilePath, (r, o) -> { HFileBlock block = null; @@ -170,18 +224,33 @@ private void readStoreFile(Path storeFilePath, } } - private Path writeStoreFile(String fname) throws IOException { + private HFile.Reader createReaderAndWaitForPrefetchInterruption(Path storeFilePath) + throws Exception { + // Open the file + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); + + while (!reader.prefetchComplete()) { + // Sleep for a bit + Thread.sleep(1000); + } + assertEquals(0, BucketCache.getBucketCacheFromCacheConfig(cacheConf).get().getFullyCachedFiles() + .get().size()); + + return reader; + } + + private Path writeStoreFile(String fname, int numKVs) throws IOException { HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); - return writeStoreFile(fname, meta); + return writeStoreFile(fname, meta, numKVs); } - private Path writeStoreFile(String fname, HFileContext context) throws IOException { + private Path writeStoreFile(String fname, HFileContext context, int numKVs) throws IOException { Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), fname); StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir).withFileContext(context).build(); Random rand = ThreadLocalRandom.current(); final int rowLen = 32; - for (int i = 0; i < NUM_KV; ++i) { + for (int i = 0; i < numKVs; ++i) { byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i); byte[] v = RandomKeyValueUtil.randomValue(rand); int cfLen = rand.nextInt(k.length - rowLen + 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 0cbafedc7c53..6a9b5bf382a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -363,7 +363,6 @@ private void testRetrievalUtils(Path testDir, String ioEngineName) assertTrue(new File(persistencePath).exists()); bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath); - assertFalse(new File(persistencePath).exists()); assertEquals(usedSize, bucketCache.getAllocator().getUsedSize()); } finally { if (bucketCache != null) { @@ -820,7 +819,6 @@ public void testFreeBucketEntryRestoredFromFile() throws Exception { // restore cache from file bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath); - assertFalse(new File(persistencePath).exists()); assertEquals(usedByteSize, bucketCache.getAllocator().getUsedSize()); for (HFileBlockPair hfileBlockPair : hfileBlockPairs) { @@ -877,7 +875,6 @@ public void testBlockAdditionWaitWhenCache() throws Exception { // restore cache from file bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath); - assertFalse(new File(persistencePath).exists()); assertEquals(usedByteSize, bucketCache.getAllocator().getUsedSize()); for (HFileBlockPair hfileBlockPair : hfileBlockPairs) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java index bd69f28e1eac..f6d3efa9015d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java @@ -22,6 +22,8 @@ import java.io.File; import java.io.IOException; +import java.util.Iterator; +import java.util.Map; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; @@ -38,6 +40,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.io.hfile.PrefetchExecutor; import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -131,18 +134,50 @@ public void testPrefetchPersistenceCrashNegative() throws Exception { @Test public void testPrefetchListUponBlockEviction() throws Exception { Configuration conf = setupBucketCacheConfig(200); - BucketCache bucketCache1 = setupBucketCache(conf); - CacheConfig cacheConf = new CacheConfig(conf, bucketCache1); + BucketCache bucketCache = setupBucketCache(conf); + CacheConfig cacheConf = new CacheConfig(conf, bucketCache); + FileSystem fs = HFileSystem.get(conf); + // Load Blocks in cache + Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs); + readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache); + int retries = 0; + while (!bucketCache.fullyCachedFiles.containsKey(storeFile.getName()) && retries < 5) { + Thread.sleep(500); + retries++; + } + assertTrue(retries < 5); + BlockCacheKey bucketCacheKey = bucketCache.backingMap.entrySet().iterator().next().getKey(); + // Evict Blocks from cache + bucketCache.evictBlock(bucketCacheKey); + assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName())); + cleanupBucketCache(bucketCache); + } + + @Test + public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception { + Configuration conf = setupBucketCacheConfig(200); + BucketCache bucketCache = setupBucketCache(conf); + CacheConfig cacheConf = new CacheConfig(conf, bucketCache); FileSystem fs = HFileSystem.get(conf); // Load Blocks in cache Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs); - readStoreFile(storeFile, 0, fs, cacheConf, conf, bucketCache1); - Thread.sleep(500); + HFile.createReader(fs, storeFile, cacheConf, true, conf); + while (bucketCache.backingMap.size() == 0) { + Thread.sleep(10); + } + Iterator> it = + bucketCache.backingMap.entrySet().iterator(); // Evict Blocks from cache - assertTrue(bucketCache1.fullyCachedFiles.containsKey(storeFile.getName())); - BlockCacheKey bucketCacheKey = bucketCache1.backingMap.entrySet().iterator().next().getKey(); - bucketCache1.evictBlock(bucketCacheKey); - assertFalse(bucketCache1.fullyCachedFiles.containsKey(storeFile.getName())); + bucketCache.evictBlock(it.next().getKey()); + bucketCache.evictBlock(it.next().getKey()); + int retries = 0; + while (!PrefetchExecutor.isCompleted(storeFile) && retries < 5) { + Thread.sleep(500); + retries++; + } + assertTrue(retries < 5); + assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName())); + cleanupBucketCache(bucketCache); } public void readStoreFile(Path storeFilePath, long offset, FileSystem fs, CacheConfig cacheConf, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java index f15874bc61c2..035cdc3f887e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java @@ -110,7 +110,6 @@ public void setup() throws IOException { @Test public void testPrefetchPersistence() throws Exception { - bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", 60 * 1000, conf); @@ -133,8 +132,6 @@ public void testPrefetchPersistence() throws Exception { constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", 60 * 1000, conf); cacheConf = new CacheConfig(conf, bucketCache); - assertFalse(new File(testDir + "/bucket.persistence").exists()); - assertFalse(new File(testDir + "/prefetch.persistence").exists()); assertTrue(usedSize != 0); readStoreFile(storeFile, 0); readStoreFile(storeFile2, 0); From dba900f778614b392e5e83522cdda72487558f48 Mon Sep 17 00:00:00 2001 From: hiping-tech <58875741+hiping-tech@users.noreply.github.com> Date: Fri, 24 Nov 2023 10:29:34 +0800 Subject: [PATCH 150/514] HBASE-28218 Add a check for getQueueStorage().hasData() in the getDeletableFiles method of ReplicationLogCleaner (#5536) Co-authored-by: lvhaiping.lhp Signed-off-by: Duo Zhang --- .../hbase/replication/master/ReplicationLogCleaner.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 6ebcac7e453a..7fc8feae72ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -192,6 +192,14 @@ public Iterable getDeletableFiles(Iterable files) { if (this.getConf() == null) { return files; } + try { + if (!rpm.getQueueStorage().hasData()) { + return files; + } + } catch (ReplicationException e) { + LOG.error("Error occurred while executing queueStorage.hasData()", e); + return Collections.emptyList(); + } if (!canFilter) { // We can not delete anything if there are AddPeerProcedure running at the same time // See HBASE-27214 for more details. From 6f8b288c4254e407a2969e8cfd06468144150259 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 28 Nov 2023 18:28:34 +0800 Subject: [PATCH 151/514] HBASE-28031 TestClusterScopeQuotaThrottle is still failing with broken WAL writer (#5539) Limit the scope for EnvironmentEdge injection Signed-off-by: Guanghao Zhang --- .../EnvironmentEdgeManagerTestHelper.java | 32 +++++++++++++++++++ .../quotas/TestClusterScopeQuotaThrottle.java | 3 -- .../hbase/quotas/ThrottleQuotaTestUtil.java | 4 ++- 3 files changed, 35 insertions(+), 4 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManagerTestHelper.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManagerTestHelper.java index 684247248dc6..73e7f1623ef8 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManagerTestHelper.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManagerTestHelper.java @@ -33,4 +33,36 @@ public static void reset() { public static void injectEdge(EnvironmentEdge edge) { EnvironmentEdgeManager.injectEdge(edge); } + + private static final class PackageEnvironmentEdgeWrapper implements EnvironmentEdge { + + private final EnvironmentEdge delegate; + + private final String packageName; + + PackageEnvironmentEdgeWrapper(EnvironmentEdge delegate, String packageName) { + this.delegate = delegate; + this.packageName = packageName; + } + + @Override + public long currentTime() { + StackTraceElement[] elements = new Exception().getStackTrace(); + // the first element is us, the second one is EnvironmentEdgeManager, so let's check the third + // one + if (elements.length > 2 && elements[2].getClassName().startsWith(packageName)) { + return delegate.currentTime(); + } else { + return System.currentTimeMillis(); + } + } + } + + /** + * Inject a {@link EnvironmentEdge} which only takes effect when calling directly from the classes + * in the given package. + */ + public static void injectEdgeForPackage(EnvironmentEdge edge, String packageName) { + injectEdge(new PackageEnvironmentEdgeWrapper(edge, packageName)); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java index c617c34800f7..b34f722e2e78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java @@ -75,9 +75,6 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); - // disable stream slow monitor check, as in this test we inject our own EnvironmentEdge - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.async.wal.min.slow.detect.count", - Integer.MAX_VALUE); TEST_UTIL.startMiniCluster(2); TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); QuotaCache.TEST_FORCE_REFRESH = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java index 93eae8dfccf6..a6e93b663c04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java @@ -42,7 +42,9 @@ public final class ThrottleQuotaTestUtil { private final static int REFRESH_TIME = 30 * 60000; static { envEdge.setValue(EnvironmentEdgeManager.currentTime()); - EnvironmentEdgeManagerTestHelper.injectEdge(envEdge); + // only active the envEdge for quotas package + EnvironmentEdgeManagerTestHelper.injectEdgeForPackage(envEdge, + ThrottleQuotaTestUtil.class.getPackage().getName()); } private ThrottleQuotaTestUtil() { From dbfb516a557fc9358a68db714fe0933b33ba04f5 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 28 Nov 2023 13:20:15 +0000 Subject: [PATCH 152/514] HBASE-28217 PrefetchExecutor should not run for files from CFs that have disabled BLOCKCACHE (#5535) Signed-off-by: Peter Somogyi --- .../hadoop/hbase/io/hfile/CacheConfig.java | 2 +- .../hbase/io/hfile/PrefetchExecutor.java | 5 ++ .../hadoop/hbase/io/hfile/TestPrefetch.java | 52 ++++++++++++++++++- 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 57f91fa19f44..4587eced6163 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -343,7 +343,7 @@ public boolean shouldCacheCompressed(BlockCategory category) { /** Returns true if blocks should be prefetched into the cache on open, false if not */ public boolean shouldPrefetchOnOpen() { - return this.prefetchOnOpen; + return this.prefetchOnOpen && this.cacheDataOnRead; } /** Returns true if blocks should be cached while writing during compaction, false if not */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index 02fbc12e85c7..4ae19193c8a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -129,4 +129,9 @@ public static boolean isCompleted(Path path) { private PrefetchExecutor() { } + + /* Visible for testing only */ + static ScheduledExecutorService getExecutorPool() { + return prefetchExecutorPool; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index b58319179c56..0b45a930dceb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.not; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -35,6 +36,7 @@ import java.io.IOException; import java.util.List; import java.util.Random; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; @@ -120,6 +122,40 @@ public void testPrefetchSetInHCDWorks() { assertTrue(cc.shouldPrefetchOnOpen()); } + @Test + public void testPrefetchBlockCacheDisabled() throws Exception { + ScheduledThreadPoolExecutor poolExecutor = + (ScheduledThreadPoolExecutor) PrefetchExecutor.getExecutorPool(); + long totalCompletedBefore = poolExecutor.getCompletedTaskCount(); + long queueBefore = poolExecutor.getQueue().size(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setPrefetchBlocksOnOpen(true) + .setBlockCacheEnabled(false).build(); + HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); + CacheConfig cacheConfig = + new CacheConfig(conf, columnFamilyDescriptor, blockCache, ByteBuffAllocator.HEAP); + Path storeFile = writeStoreFile("testPrefetchBlockCacheDisabled", meta, cacheConfig); + readStoreFile(storeFile, (r, o) -> { + HFileBlock block = null; + try { + block = r.readBlock(o, -1, false, true, false, true, null, null); + } catch (IOException e) { + fail(e.getMessage()); + } + return block; + }, (key, block) -> { + boolean isCached = blockCache.getBlock(key, true, false, true) != null; + if ( + block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX + || block.getBlockType() == BlockType.INTERMEDIATE_INDEX + ) { + assertFalse(isCached); + } + }, cacheConfig); + assertEquals(totalCompletedBefore + queueBefore, + poolExecutor.getCompletedTaskCount() + poolExecutor.getQueue().size()); + } + @Test public void testPrefetch() throws Exception { TraceUtil.trace(() -> { @@ -212,8 +248,15 @@ private void readStoreFileCacheOnly(Path storeFilePath) throws Exception { private void readStoreFile(Path storeFilePath, BiFunction readFunction, BiConsumer validationFunction) throws Exception { + readStoreFile(storeFilePath, readFunction, validationFunction, cacheConf); + } + + private void readStoreFile(Path storeFilePath, + BiFunction readFunction, + BiConsumer validationFunction, CacheConfig cacheConfig) + throws Exception { // Open the file - HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf); while (!reader.prefetchComplete()) { // Sleep for a bit @@ -350,8 +393,13 @@ private Path writeStoreFile(String fname) throws IOException { } private Path writeStoreFile(String fname, HFileContext context) throws IOException { + return writeStoreFile(fname, context, cacheConf); + } + + private Path writeStoreFile(String fname, HFileContext context, CacheConfig cacheConfig) + throws IOException { Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), fname); - StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConfig, fs) .withOutputDir(storeFileParentDir).withFileContext(context).build(); Random rand = ThreadLocalRandom.current(); final int rowLen = 32; From 4b015e6a5486394d70bbf5fc0197e469c0987913 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 29 Nov 2023 10:59:38 +0800 Subject: [PATCH 153/514] HBASE-28212 Do not need to maintain rollback step when root procedure does not support rollback (#5538) Signed-off-by: GeorryHuang --- .../hadoop/hbase/procedure2/Procedure.java | 22 +- .../hbase/procedure2/ProcedureExecutor.java | 346 ++++++++++++------ .../hbase/procedure2/ProcedureUtil.java | 5 + .../hbase/procedure2/RootProcedureState.java | 52 ++- .../procedure2/StateMachineProcedure.java | 5 + .../procedure2/ProcedureTestingUtility.java | 37 +- .../procedure2/TestProcedureRecovery.java | 16 +- .../procedure2/TestStateMachineProcedure.java | 6 + .../hbase/procedure2/TestYieldProcedures.java | 6 + .../store/wal/TestWALProcedureStore.java | 10 +- .../src/main/protobuf/server/Procedure.proto | 4 + .../master/assignment/TestRegionBypass.java | 36 +- .../master/assignment/TestRollbackSCP.java | 186 ++++++++++ 13 files changed, 578 insertions(+), 153 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 43adba2bc21a..7bd64fd9944d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -133,6 +133,9 @@ public enum LockState { private RemoteProcedureException exception = null; private int[] stackIndexes = null; private int childrenLatch = 0; + // since we do not always maintain stackIndexes if the root procedure does not support rollback, + // we need a separated flag to indicate whether a procedure was executed + private boolean wasExecuted; private volatile int timeout = NO_TIMEOUT; private volatile long lastUpdate; @@ -870,6 +873,7 @@ protected synchronized void addStackIndex(final int index) { stackIndexes = Arrays.copyOf(stackIndexes, count + 1); stackIndexes[count] = index; } + wasExecuted = true; } protected synchronized boolean removeStackIndex() { @@ -890,16 +894,32 @@ protected synchronized void setStackIndexes(final List stackIndexes) { for (int i = 0; i < this.stackIndexes.length; ++i) { this.stackIndexes[i] = stackIndexes.get(i); } + // for backward compatible, where a procedure is serialized before we added the executed flag, + // the flag will be false so we need to set the wasExecuted flag here + this.wasExecuted = true; + } + + protected synchronized void setExecuted() { + this.wasExecuted = true; } protected synchronized boolean wasExecuted() { - return stackIndexes != null; + return wasExecuted; } protected synchronized int[] getStackIndexes() { return stackIndexes; } + /** + * Return whether the procedure supports rollback. If the procedure does not support rollback, we + * can skip the rollback state management which could increase the performance. See HBASE-28210 + * and HBASE-28212. + */ + protected boolean isRollbackSupported() { + return true; + } + // ========================================================================== // Internal methods - called by the ProcedureExecutor // ========================================================================== diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 46ce065b8778..3099c64e00f6 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -24,9 +24,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Comparator; import java.util.Deque; import java.util.HashSet; import java.util.List; +import java.util.PriorityQueue; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -117,6 +119,9 @@ public static class Testing { protected volatile boolean killAfterStoreUpdate = false; protected volatile boolean toggleKillAfterStoreUpdate = false; + protected volatile boolean killBeforeStoreUpdateInRollback = false; + protected volatile boolean toggleKillBeforeStoreUpdateInRollback = false; + protected boolean shouldKillBeforeStoreUpdate() { final boolean kill = this.killBeforeStoreUpdate; if (this.toggleKillBeforeStoreUpdate) { @@ -148,6 +153,16 @@ protected boolean shouldKillAfterStoreUpdate() { protected boolean shouldKillAfterStoreUpdate(final boolean isSuspended) { return (isSuspended && !killIfSuspended) ? false : shouldKillAfterStoreUpdate(); } + + protected boolean shouldKillBeforeStoreUpdateInRollback() { + final boolean kill = this.killBeforeStoreUpdateInRollback; + if (this.toggleKillBeforeStoreUpdateInRollback) { + this.killBeforeStoreUpdateInRollback = !kill; + LOG.warn("Toggle KILL before store update in rollback to: " + + this.killBeforeStoreUpdateInRollback); + } + return kill; + } } public interface ProcedureExecutorListener { @@ -394,68 +409,10 @@ private void restoreLocks() { }); } - private void loadProcedures(ProcedureIterator procIter) throws IOException { - // 1. Build the rollback stack - int runnableCount = 0; - int failedCount = 0; - int waitingCount = 0; - int waitingTimeoutCount = 0; - while (procIter.hasNext()) { - boolean finished = procIter.isNextFinished(); - @SuppressWarnings("unchecked") - Procedure proc = procIter.next(); - NonceKey nonceKey = proc.getNonceKey(); - long procId = proc.getProcId(); - - if (finished) { - completed.put(proc.getProcId(), new CompletedProcedureRetainer<>(proc)); - LOG.debug("Completed {}", proc); - } else { - if (!proc.hasParent()) { - assert !proc.isFinished() : "unexpected finished procedure"; - rollbackStack.put(proc.getProcId(), new RootProcedureState<>()); - } - - // add the procedure to the map - proc.beforeReplay(getEnvironment()); - procedures.put(proc.getProcId(), proc); - switch (proc.getState()) { - case RUNNABLE: - runnableCount++; - break; - case FAILED: - failedCount++; - break; - case WAITING: - waitingCount++; - break; - case WAITING_TIMEOUT: - waitingTimeoutCount++; - break; - default: - break; - } - } - - if (nonceKey != null) { - nonceKeysToProcIdsMap.put(nonceKey, procId); // add the nonce to the map - } - } - - // 2. Initialize the stacks: In the old implementation, for procedures in FAILED state, we will - // push it into the ProcedureScheduler directly to execute the rollback. But this does not work - // after we introduce the restore lock stage. For now, when we acquire a xlock, we will remove - // the queue from runQueue in scheduler, and then when a procedure which has lock access, for - // example, a sub procedure of the procedure which has the xlock, is pushed into the scheduler, - // we will add the queue back to let the workers poll from it. The assumption here is that, the - // procedure which has the xlock should have been polled out already, so when loading we can not - // add the procedure to scheduler first and then call acquireLock, since the procedure is still - // in the queue, and since we will remove the queue from runQueue, then no one can poll it out, - // then there is a dead lock - List> runnableList = new ArrayList<>(runnableCount); - List> failedList = new ArrayList<>(failedCount); - List> waitingList = new ArrayList<>(waitingCount); - List> waitingTimeoutList = new ArrayList<>(waitingTimeoutCount); + private void initializeStacks(ProcedureIterator procIter, + List> runnableList, List> failedList, + List> waitingList, List> waitingTimeoutList) + throws IOException { procIter.reset(); while (procIter.hasNext()) { if (procIter.isNextFinished()) { @@ -504,8 +461,19 @@ private void loadProcedures(ProcedureIterator procIter) throws IOException { break; } } + rollbackStack.forEach((rootProcId, procStack) -> { + if (procStack.getSubproceduresStack() != null) { + // if we have already record some stack ids, it means we support rollback + procStack.setRollbackSupported(true); + } else { + // otherwise, test the root procedure to see if we support rollback + procStack.setRollbackSupported(procedures.get(rootProcId).isRollbackSupported()); + } + }); + } - // 3. Check the waiting procedures to see if some of them can be added to runnable. + private void processWaitingProcedures(List> waitingList, + List> runnableList) { waitingList.forEach(proc -> { if (!proc.hasChildren()) { // Normally, WAITING procedures should be waken by its children. But, there is a case that, @@ -522,16 +490,17 @@ private void loadProcedures(ProcedureIterator procIter) throws IOException { proc.afterReplay(getEnvironment()); } }); - // 4. restore locks - restoreLocks(); + } - // 5. Push the procedures to the timeout executor + private void processWaitingTimeoutProcedures(List> waitingTimeoutList) { waitingTimeoutList.forEach(proc -> { proc.afterReplay(getEnvironment()); timeoutExecutor.add(proc); }); + } - // 6. Push the procedure to the scheduler + private void pushProceduresAfterLoad(List> runnableList, + List> failedList) { failedList.forEach(scheduler::addBack); runnableList.forEach(p -> { p.afterReplay(getEnvironment()); @@ -540,6 +509,84 @@ private void loadProcedures(ProcedureIterator procIter) throws IOException { } scheduler.addBack(p); }); + } + + private void loadProcedures(ProcedureIterator procIter) throws IOException { + // 1. Build the rollback stack + int runnableCount = 0; + int failedCount = 0; + int waitingCount = 0; + int waitingTimeoutCount = 0; + while (procIter.hasNext()) { + boolean finished = procIter.isNextFinished(); + @SuppressWarnings("unchecked") + Procedure proc = procIter.next(); + NonceKey nonceKey = proc.getNonceKey(); + long procId = proc.getProcId(); + + if (finished) { + completed.put(proc.getProcId(), new CompletedProcedureRetainer<>(proc)); + LOG.debug("Completed {}", proc); + } else { + if (!proc.hasParent()) { + assert !proc.isFinished() : "unexpected finished procedure"; + rollbackStack.put(proc.getProcId(), new RootProcedureState<>()); + } + + // add the procedure to the map + proc.beforeReplay(getEnvironment()); + procedures.put(proc.getProcId(), proc); + switch (proc.getState()) { + case RUNNABLE: + runnableCount++; + break; + case FAILED: + failedCount++; + break; + case WAITING: + waitingCount++; + break; + case WAITING_TIMEOUT: + waitingTimeoutCount++; + break; + default: + break; + } + } + + if (nonceKey != null) { + nonceKeysToProcIdsMap.put(nonceKey, procId); // add the nonce to the map + } + } + + // 2. Initialize the stacks: In the old implementation, for procedures in FAILED state, we will + // push it into the ProcedureScheduler directly to execute the rollback. But this does not work + // after we introduce the restore lock stage. For now, when we acquire a xlock, we will remove + // the queue from runQueue in scheduler, and then when a procedure which has lock access, for + // example, a sub procedure of the procedure which has the xlock, is pushed into the scheduler, + // we will add the queue back to let the workers poll from it. The assumption here is that, the + // procedure which has the xlock should have been polled out already, so when loading we can not + // add the procedure to scheduler first and then call acquireLock, since the procedure is still + // in the queue, and since we will remove the queue from runQueue, then no one can poll it out, + // then there is a dead lock + List> runnableList = new ArrayList<>(runnableCount); + List> failedList = new ArrayList<>(failedCount); + List> waitingList = new ArrayList<>(waitingCount); + List> waitingTimeoutList = new ArrayList<>(waitingTimeoutCount); + + initializeStacks(procIter, runnableList, failedList, waitingList, waitingTimeoutList); + + // 3. Check the waiting procedures to see if some of them can be added to runnable. + processWaitingProcedures(waitingList, runnableList); + + // 4. restore locks + restoreLocks(); + + // 5. Push the procedures to the timeout executor + processWaitingTimeoutProcedures(waitingTimeoutList); + + // 6. Push the procedure to the scheduler + pushProceduresAfterLoad(runnableList, failedList); // After all procedures put into the queue, signal the worker threads. // Otherwise, there is a race condition. See HBASE-21364. scheduler.signalAll(); @@ -1080,6 +1127,7 @@ private long pushProcedure(Procedure proc) { // Create the rollback stack for the procedure RootProcedureState stack = new RootProcedureState<>(); + stack.setRollbackSupported(proc.isRollbackSupported()); rollbackStack.put(currentProcId, stack); // Submit the new subprocedures @@ -1441,42 +1489,75 @@ private void releaseLock(Procedure proc, boolean force) { } } - /** - * Execute the rollback of the full procedure stack. Once the procedure is rolledback, the - * root-procedure will be visible as finished to user, and the result will be the fatal exception. - */ - private LockState executeRollback(long rootProcId, RootProcedureState procStack) { - Procedure rootProc = procedures.get(rootProcId); - RemoteProcedureException exception = rootProc.getException(); - // TODO: This needs doc. The root proc doesn't have an exception. Maybe we are - // rolling back because the subprocedure does. Clarify. - if (exception == null) { - exception = procStack.getException(); - rootProc.setFailure(exception); - store.update(rootProc); + // Returning null means we have already held the execution lock, so you do not need to get the + // lock entry for releasing + private IdLock.Entry getLockEntryForRollback(long procId) { + // Hold the execution lock if it is not held by us. The IdLock is not reentrant so we need + // this check, as the worker will hold the lock before executing a procedure. This is the only + // place where we may hold two procedure execution locks, and there is a fence in the + // RootProcedureState where we can make sure that only one worker can execute the rollback of + // a RootProcedureState, so there is no dead lock problem. And the lock here is necessary to + // prevent race between us and the force update thread. + if (!procExecutionLock.isHeldByCurrentThread(procId)) { + try { + return procExecutionLock.getLockEntry(procId); + } catch (IOException e) { + // can only happen if interrupted, so not a big deal to propagate it + throw new UncheckedIOException(e); + } + } + return null; + } + + private void executeUnexpectedRollback(Procedure rootProc, + RootProcedureState procStack) { + if (procStack.getSubprocs() != null) { + // comparing proc id in reverse order, so we will delete later procedures first, otherwise we + // may delete parent procedure first and if we fail in the middle of this operation, when + // loading we will find some orphan procedures + PriorityQueue> pq = + new PriorityQueue<>(procStack.getSubprocs().size(), + Comparator.> comparingLong(Procedure::getProcId).reversed()); + pq.addAll(procStack.getSubprocs()); + for (;;) { + Procedure subproc = pq.poll(); + if (subproc == null) { + break; + } + if (!procedures.containsKey(subproc.getProcId())) { + // this means it has already been rolledback + continue; + } + IdLock.Entry lockEntry = getLockEntryForRollback(subproc.getProcId()); + try { + cleanupAfterRollbackOneStep(subproc); + execCompletionCleanup(subproc); + } finally { + if (lockEntry != null) { + procExecutionLock.releaseLockEntry(lockEntry); + } + } + } + } + IdLock.Entry lockEntry = getLockEntryForRollback(rootProc.getProcId()); + try { + cleanupAfterRollbackOneStep(rootProc); + } finally { + if (lockEntry != null) { + procExecutionLock.releaseLockEntry(lockEntry); + } } + } + private LockState executeNormalRollback(Procedure rootProc, + RootProcedureState procStack) { List> subprocStack = procStack.getSubproceduresStack(); assert subprocStack != null : "Called rollback with no steps executed rootProc=" + rootProc; int stackTail = subprocStack.size(); while (stackTail-- > 0) { Procedure proc = subprocStack.get(stackTail); - IdLock.Entry lockEntry = null; - // Hold the execution lock if it is not held by us. The IdLock is not reentrant so we need - // this check, as the worker will hold the lock before executing a procedure. This is the only - // place where we may hold two procedure execution locks, and there is a fence in the - // RootProcedureState where we can make sure that only one worker can execute the rollback of - // a RootProcedureState, so there is no dead lock problem. And the lock here is necessary to - // prevent race between us and the force update thread. - if (!procExecutionLock.isHeldByCurrentThread(proc.getProcId())) { - try { - lockEntry = procExecutionLock.getLockEntry(proc.getProcId()); - } catch (IOException e) { - // can only happen if interrupted, so not a big deal to propagate it - throw new UncheckedIOException(e); - } - } + IdLock.Entry lockEntry = getLockEntryForRollback(proc.getProcId()); try { // For the sub procedures which are successfully finished, we do not rollback them. // Typically, if we want to rollback a procedure, we first need to rollback it, and then @@ -1526,15 +1607,59 @@ private LockState executeRollback(long rootProcId, RootProcedureState procStack) { + Procedure rootProc = procedures.get(rootProcId); + RemoteProcedureException exception = rootProc.getException(); + // TODO: This needs doc. The root proc doesn't have an exception. Maybe we are + // rolling back because the subprocedure does. Clarify. + if (exception == null) { + exception = procStack.getException(); + rootProc.setFailure(exception); + store.update(rootProc); + } + + if (procStack.isRollbackSupported()) { + LockState lockState = executeNormalRollback(rootProc, procStack); + if (lockState != LockState.LOCK_ACQUIRED) { + return lockState; + } + } else { + // the procedure does not support rollback, so typically we should not reach here, this + // usually means there are code bugs, let's just wait all the subprocedures to finish and then + // mark the root procedure as failure. + LOG.error(HBaseMarkers.FATAL, + "Root Procedure {} does not support rollback but the execution failed" + + " and try to rollback, code bug?", + rootProc, exception); + executeUnexpectedRollback(rootProc, procStack); + } + + IdLock.Entry lockEntry = getLockEntryForRollback(rootProc.getProcId()); + try { + // Finalize the procedure state + LOG.info("Rolled back {} exec-time={}", rootProc, + StringUtils.humanTimeDiff(rootProc.elapsedTime())); + procedureFinished(rootProc); + } finally { + if (lockEntry != null) { + procExecutionLock.releaseLockEntry(lockEntry); + } + } - // Finalize the procedure state - LOG.info("Rolled back {} exec-time={}", rootProc, - StringUtils.humanTimeDiff(rootProc.elapsedTime())); - procedureFinished(rootProc); return LockState.LOCK_ACQUIRED; } private void cleanupAfterRollbackOneStep(Procedure proc) { + if (testing != null && testing.shouldKillBeforeStoreUpdateInRollback()) { + kill("TESTING: Kill BEFORE store update in rollback: " + proc); + } if (proc.removeStackIndex()) { if (!proc.isSuccess()) { proc.setState(ProcedureState.ROLLEDBACK); @@ -1577,15 +1702,6 @@ private LockState executeRollback(Procedure proc) { LOG.error(HBaseMarkers.FATAL, "CODE-BUG: Uncaught runtime exception for " + proc, e); } - // allows to kill the executor before something is stored to the wal. - // useful to test the procedure recovery. - if (testing != null && testing.shouldKillBeforeStoreUpdate()) { - String msg = "TESTING: Kill before store update"; - LOG.debug(msg); - stop(); - throw new RuntimeException(msg); - } - cleanupAfterRollbackOneStep(proc); return LockState.LOCK_ACQUIRED; @@ -1714,8 +1830,20 @@ private void execProcedure(RootProcedureState procStack, if (procedure.needPersistence()) { // Add the procedure to the stack // See HBASE-28210 on why we need synchronized here + boolean needUpdateStoreOutsideLock = false; synchronized (procStack) { - procStack.addRollbackStep(procedure); + if (procStack.addRollbackStep(procedure)) { + updateStoreOnExec(procStack, procedure, subprocs); + } else { + needUpdateStoreOutsideLock = true; + } + } + // this is an optimization if we do not need to maintain rollback step, as all subprocedures + // of the same root procedure share the same root procedure state, if we can only update + // store under the above lock, the sub procedures of the same root procedure can only be + // persistent sequentially, which will have a bad performance. See HBASE-28212 for more + // details. + if (needUpdateStoreOutsideLock) { updateStoreOnExec(procStack, procedure, subprocs); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java index 4a225161dbf9..04ae16ddc3f9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java @@ -46,6 +46,7 @@ */ @InterfaceAudience.Private public final class ProcedureUtil { + private ProcedureUtil() { } @@ -188,6 +189,7 @@ public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure pro builder.addStackId(stackIds[i]); } } + builder.setExecuted(proc.wasExecuted()); if (proc.hasException()) { RemoteProcedureException exception = proc.getException(); @@ -253,6 +255,9 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) if (proto.getStackIdCount() > 0) { proc.setStackIndexes(proto.getStackIdList()); } + if (proto.getExecuted()) { + proc.setExecuted(); + } if (proto.hasException()) { assert proc.getState() == ProcedureProtos.ProcedureState.FAILED diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java index 9990bdeb4306..c9f5bad2a131 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.procedure2; import java.util.ArrayList; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -32,8 +33,13 @@ * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". A "Root * Procedure" is a Procedure without parent, each subprocedure will be added to the "Root Procedure" * stack (or rollback-stack). RootProcedureState is used and managed only by the ProcedureExecutor. - * Long rootProcId = getRootProcedureId(proc); rollbackStack.get(rootProcId).acquire(proc) - * rollbackStack.get(rootProcId).release(proc) ... + * + *

+ *   Long rootProcId = getRootProcedureId(proc);
+ *   rollbackStack.get(rootProcId).acquire(proc)
+ *   rollbackStack.get(rootProcId).release(proc)
+ *   ...
+ * 
*/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -50,8 +56,15 @@ private enum State { private ArrayList> subprocStack = null; private State state = State.RUNNING; private int running = 0; - - public synchronized boolean isFailed() { + // for some procedures such as SCP and TRSP, there is no way to rollback, so we do not need to + // maintain the rollback steps + // TODO: the rollback logic is a bit complicated, so here we will only test whether the root + // procedure supports rollback at the very beginning, actually, lots of procedure can only + // rollback at the pre check step, after that there is no rollback too, we should try to support + // this too. + private boolean rollbackSupported; + + protected synchronized boolean isFailed() { switch (state) { case ROLLINGBACK: case FAILED: @@ -62,7 +75,7 @@ public synchronized boolean isFailed() { return false; } - public synchronized boolean isRollingback() { + protected synchronized boolean isRollingback() { return state == State.ROLLINGBACK; } @@ -85,6 +98,14 @@ protected synchronized void unsetRollback() { state = State.FAILED; } + protected synchronized void setRollbackSupported(boolean rollbackSupported) { + this.rollbackSupported = rollbackSupported; + } + + protected synchronized boolean isRollbackSupported() { + return rollbackSupported; + } + protected synchronized long[] getSubprocedureIds() { if (subprocs == null) { return null; @@ -92,13 +113,17 @@ protected synchronized long[] getSubprocedureIds() { return subprocs.stream().mapToLong(Procedure::getProcId).toArray(); } + protected synchronized Collection> getSubprocs() { + return subprocs; + } + protected synchronized List> getSubproceduresStack() { return subprocStack; } protected synchronized RemoteProcedureException getException() { - if (subprocStack != null) { - for (Procedure proc : subprocStack) { + if (subprocs != null) { + for (Procedure proc : subprocs) { if (proc.hasException()) { return proc.getException(); } @@ -134,18 +159,27 @@ protected synchronized void abort() { /** * Called by the ProcedureExecutor after the procedure step is completed, to add the step to the - * rollback list (or procedure stack) + * rollback list (or procedure stack). + *

+ * Return whether we successfully added the rollback step. If the root procedure has already + * crossed the PONR, we do not need to maintain the rollback step, */ - protected synchronized void addRollbackStep(Procedure proc) { + protected synchronized boolean addRollbackStep(Procedure proc) { if (proc.isFailed()) { state = State.FAILED; } + if (!rollbackSupported) { + // just record executed, skip adding rollback step + proc.setExecuted(); + return false; + } if (subprocStack == null) { subprocStack = new ArrayList<>(); } proc.addStackIndex(subprocStack.size()); LOG.trace("Add procedure {} as the {}th rollback step", proc, subprocStack.size()); subprocStack.add(proc); + return true; } protected synchronized void addSubProcedure(Procedure proc) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index d7ab269cb557..b90600b47075 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -249,6 +249,11 @@ protected final void failIfAborted() { } } + @Override + protected final boolean isRollbackSupported() { + return isRollbackSupported(getCurrentState()); + } + /** * Used by the default implementation of abort() to know if the current state can be aborted and * rollback can be triggered. diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index 926a46e9c56c..32be56e44dbd 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -194,6 +194,15 @@ public static void setKillBeforeStoreUpdate(ProcedureExecutor procE assertSingleExecutorForKillTests(procExecutor); } + public static void setKillBeforeStoreUpdateInRollback(ProcedureExecutor procExecutor, + boolean value) { + createExecutorTesting(procExecutor); + procExecutor.testing.killBeforeStoreUpdateInRollback = value; + LOG.warn("Set Kill before store update in rollback to: " + + procExecutor.testing.killBeforeStoreUpdateInRollback); + assertSingleExecutorForKillTests(procExecutor); + } + public static void setToggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor, boolean value) { createExecutorTesting(procExecutor); @@ -201,6 +210,13 @@ public static void setToggleKillBeforeStoreUpdate(ProcedureExecutor assertSingleExecutorForKillTests(procExecutor); } + public static void + setToggleKillBeforeStoreUpdateInRollback(ProcedureExecutor procExecutor, boolean value) { + createExecutorTesting(procExecutor); + procExecutor.testing.toggleKillBeforeStoreUpdateInRollback = value; + assertSingleExecutorForKillTests(procExecutor); + } + public static void toggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor) { createExecutorTesting(procExecutor); procExecutor.testing.killBeforeStoreUpdate = !procExecutor.testing.killBeforeStoreUpdate; @@ -208,6 +224,16 @@ public static void toggleKillBeforeStoreUpdate(ProcedureExecutor pr assertSingleExecutorForKillTests(procExecutor); } + public static void + toggleKillBeforeStoreUpdateInRollback(ProcedureExecutor procExecutor) { + createExecutorTesting(procExecutor); + procExecutor.testing.killBeforeStoreUpdateInRollback = + !procExecutor.testing.killBeforeStoreUpdateInRollback; + LOG.warn("Set Kill before store update to in rollback: " + + procExecutor.testing.killBeforeStoreUpdateInRollback); + assertSingleExecutorForKillTests(procExecutor); + } + public static void toggleKillAfterStoreUpdate(ProcedureExecutor procExecutor) { createExecutorTesting(procExecutor); procExecutor.testing.killAfterStoreUpdate = !procExecutor.testing.killAfterStoreUpdate; @@ -217,8 +243,15 @@ public static void toggleKillAfterStoreUpdate(ProcedureExecutor pro public static void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor procExecutor, boolean value) { - ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, value); - ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, value); + setKillBeforeStoreUpdate(procExecutor, value); + setToggleKillBeforeStoreUpdate(procExecutor, value); + assertSingleExecutorForKillTests(procExecutor); + } + + public static void setKillAndToggleBeforeStoreUpdateInRollback( + ProcedureExecutor procExecutor, boolean value) { + setKillBeforeStoreUpdateInRollback(procExecutor, value); + setToggleKillBeforeStoreUpdateInRollback(procExecutor, value); assertSingleExecutorForKillTests(procExecutor); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index 706803958fc0..4bc2178422aa 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -129,6 +130,7 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { env.waitOnLatch(); LOG.debug("execute procedure " + this + " step=" + step); ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor); + ProcedureTestingUtility.toggleKillBeforeStoreUpdateInRollback(procExecutor); step++; Threads.sleepWithoutInterrupt(procSleepInterval); if (isAborted()) { @@ -143,6 +145,7 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { protected void rollback(TestProcEnv env) { LOG.debug("rollback procedure " + this + " step=" + step); ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor); + ProcedureTestingUtility.toggleKillBeforeStoreUpdateInRollback(procExecutor); step++; } @@ -359,6 +362,11 @@ protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State sta return Flow.HAS_MORE_STATE; } + @Override + protected boolean isRollbackSupported(State state) { + return true; + } + @Override protected void rollbackState(TestProcEnv env, final State state) { switch (state) { @@ -425,8 +433,8 @@ public void testStateMachineMultipleLevel() throws Exception { @Test public void testStateMachineRecovery() throws Exception { - ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true); - ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdateInRollback(procExecutor, true); // Step 1 - kill Procedure proc = new TestStateMachineProcedure(); @@ -463,8 +471,8 @@ public void testStateMachineRecovery() throws Exception { @Test public void testStateMachineRollbackRecovery() throws Exception { - ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true); - ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdateInRollback(procExecutor, true); // Step 1 - kill Procedure proc = new TestStateMachineProcedure(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java index 61bc42d75ac4..e1770f5ac579 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java @@ -171,6 +171,7 @@ public void testChildBadRollbackStateCount() { public void testChildOnLastStepWithRollbackDoubleExecution() throws Exception { procExecutor.getEnvironment().triggerChildRollback = true; ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdateInRollback(procExecutor, true); long procId = procExecutor.submitProcedure(new TestSMProcedure()); ProcedureTestingUtility.testRecoveryAndDoubleExecution(procExecutor, procId, true); assertEquals(6, procExecutor.getEnvironment().execCount.get()); @@ -249,6 +250,11 @@ protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { return Flow.HAS_MORE_STATE; } + @Override + protected boolean isRollbackSupported(TestSMProcedureState state) { + return true; + } + @Override protected void rollbackState(TestProcEnv env, TestSMProcedureState state) { LOG.info("ROLLBACK " + state + " " + this); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java index b15d8b38d4da..47a5e5616e7d 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -238,6 +239,11 @@ public ArrayList getExecutionInfo() { return executionInfo; } + @Override + protected boolean isRollbackSupported(State state) { + return true; + } + @Override protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State state) throws InterruptedException { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index 9d98b0b4f95f..e01c573b1df8 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -30,6 +30,7 @@ import java.util.Comparator; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -73,7 +74,7 @@ public class TestWALProcedureStore { private WALProcedureStore procStore; - private HBaseCommonTestingUtil htu; + private final HBaseCommonTestingUtil htu = new HBaseCommonTestingUtil(); private FileSystem fs; private Path testDir; private Path logDir; @@ -84,13 +85,13 @@ private void setupConfig(final Configuration conf) { @Before public void setUp() throws IOException { - htu = new HBaseCommonTestingUtil(); testDir = htu.getDataTestDir(); htu.getConfiguration().set(HConstants.HBASE_DIR, testDir.toString()); fs = testDir.getFileSystem(htu.getConfiguration()); htu.getConfiguration().set(HConstants.HBASE_DIR, testDir.toString()); assertTrue(testDir.depth() > 1); + TestSequentialProcedure.seqId.set(0); setupConfig(htu.getConfiguration()); logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); @@ -835,10 +836,11 @@ private void verifyProcIdsOnRestart(final Set procIds) throws Exception { } public static class TestSequentialProcedure extends SequentialProcedure { - private static long seqid = 0; + + private static final AtomicLong seqId = new AtomicLong(0); public TestSequentialProcedure() { - setProcId(++seqid); + setProcId(seqId.incrementAndGet()); } @Override diff --git a/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto index addc96cd34c4..f75e858549a0 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/Procedure.proto @@ -69,6 +69,10 @@ message Procedure { // whether the procedure need to be bypassed optional bool bypass = 17 [default = false]; + + // whether the procedure has been executed + // since we do not always maintain the stack_id now, we need a separated flag + optional bool executed = 18 [default = false]; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java index 3678210ee9c5..61520873240c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.assignment; import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionState.REGION_STATE_TRANSITION_OPEN; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -93,7 +94,7 @@ public void testBypass() throws IOException, InterruptedException { TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); List regions = admin.getRegions(this.tableName); for (RegionInfo ri : regions) { - admin.unassign(ri.getRegionName(), false); + admin.unassign(ri.getRegionName()); } List pids = new ArrayList<>(regions.size()); for (RegionInfo ri : regions) { @@ -102,11 +103,8 @@ public void testBypass() throws IOException, InterruptedException { pids.add( TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().submitProcedure(p)); } - for (Long pid : pids) { - while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().isStarted(pid)) { - Thread.sleep(100); - } - } + TEST_UTIL.waitFor(30000, () -> pids.stream().allMatch( + pid -> TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().isStarted(pid))); List> ps = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getProcedures(); for (Procedure p : ps) { @@ -120,29 +118,17 @@ public void testBypass() throws IOException, InterruptedException { } // Try and assign WITHOUT override flag. Should fail!. for (RegionInfo ri : regions) { - try { - admin.assign(ri.getRegionName()); - } catch (Throwable dnrioe) { - // Expected - LOG.info("Expected {}", dnrioe); - } - } - while ( - !TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getActiveProcIds() - .isEmpty() - ) { - Thread.sleep(100); + IOException error = assertThrows(IOException.class, () -> admin.assign(ri.getRegionName())); + LOG.info("Expected {}", error); } + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getHBaseCluster().getMaster() + .getMasterProcedureExecutor().getActiveProcIds().isEmpty()); // Now assign with the override flag. for (RegionInfo ri : regions) { TEST_UTIL.getHbck().assigns(Arrays. asList(ri.getEncodedName()), true); } - while ( - !TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getActiveProcIds() - .isEmpty() - ) { - Thread.sleep(100); - } + TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getMaster() + .getMasterProcedureExecutor().getActiveProcIds().isEmpty()); for (RegionInfo ri : regions) { assertTrue(ri.toString(), TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() .getRegionStates().isRegionOnline(ri)); @@ -173,6 +159,8 @@ private void init(MasterProcedureEnv env) { @Override protected Flow executeFromState(MasterProcedureEnv env, RegionStateTransitionState state) throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + // add a sleep so we will not consume all the CPUs and write a bunch of logs + Thread.sleep(100); switch (state) { case REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE: LOG.info("LATCH1 {}", this.latch.getCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java new file mode 100644 index 000000000000..3d1a2c4caa94 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.not; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.StartTestingClusterOption; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BalanceRequest; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility; +import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; +import org.apache.hadoop.hbase.master.region.MasterRegion; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.BaseMatcher; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; + +/** + * SCP does not support rollback actually, here we just want to simulate that when there is a code + * bug, SCP and its sub procedures will not hang there forever, and it will not mess up the + * procedure store. + */ +@Category({ MasterTests.class, LargeTests.class }) +public class TestRollbackSCP { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRollbackSCP.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static final TableName TABLE_NAME = TableName.valueOf("test"); + + private static final byte[] FAMILY = Bytes.toBytes("family"); + + private static final AtomicBoolean INJECTED = new AtomicBoolean(false); + + private static final class AssignmentManagerForTest extends AssignmentManager { + + public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) { + super(master, masterRegion); + } + + @Override + void persistToMeta(RegionStateNode regionNode) throws IOException { + TransitRegionStateProcedure proc = regionNode.getProcedure(); + if (!regionNode.getRegionInfo().isMetaRegion() && proc.hasParent()) { + Procedure p = + getMaster().getMasterProcedureExecutor().getProcedure(proc.getRootProcId()); + // fail the procedure if it is a sub procedure for SCP + if (p instanceof ServerCrashProcedure) { + if (INJECTED.compareAndSet(false, true)) { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdateInRollback( + getMaster().getMasterProcedureExecutor(), true); + } + throw new RuntimeException("inject code bug"); + } + } + super.persistToMeta(regionNode); + } + } + + public static final class HMasterForTest extends HMaster { + + public HMasterForTest(Configuration conf) throws IOException { + super(conf); + } + + @Override + protected AssignmentManager createAssignmentManager(MasterServices master, + MasterRegion masterRegion) { + return new AssignmentManagerForTest(master, masterRegion); + } + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + UTIL.startMiniCluster(StartTestingClusterOption.builder().numDataNodes(3).numRegionServers(3) + .masterClass(HMasterForTest.class).build()); + UTIL.createMultiRegionTable(TABLE_NAME, FAMILY); + UTIL.waitTableAvailable(TABLE_NAME); + UTIL.getAdmin().balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(true).build()); + UTIL.waitUntilNoRegionsInTransition(); + UTIL.getAdmin().balancerSwitch(false, true); + } + + @AfterClass + public static void tearDownAfterClass() throws IOException { + UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws IOException { + UTIL.ensureSomeNonStoppedRegionServersAvailable(2); + } + + private ServerCrashProcedure getSCPForServer(ServerName serverName) throws IOException { + return UTIL.getMiniHBaseCluster().getMaster().getProcedures().stream() + .filter(p -> p instanceof ServerCrashProcedure).map(p -> (ServerCrashProcedure) p) + .filter(p -> p.getServerName().equals(serverName)).findFirst().orElse(null); + } + + private Matcher> subProcOf(Procedure proc) { + return new BaseMatcher>() { + + @Override + public boolean matches(Object item) { + if (!(item instanceof Procedure)) { + return false; + } + Procedure p = (Procedure) item; + return p.hasParent() && p.getRootProcId() == proc.getProcId(); + } + + @Override + public void describeTo(Description description) { + description.appendText("sub procedure of(").appendValue(proc).appendText(")"); + } + }; + } + + @Test + public void testFailAndRollback() throws Exception { + HRegionServer rsWithMeta = UTIL.getRSForFirstRegionInTable(TableName.META_TABLE_NAME); + UTIL.getMiniHBaseCluster().killRegionServer(rsWithMeta.getServerName()); + UTIL.waitFor(15000, () -> getSCPForServer(rsWithMeta.getServerName()) != null); + ServerCrashProcedure scp = getSCPForServer(rsWithMeta.getServerName()); + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + // wait for the procedure to stop, as we inject a code bug and also set kill before store update + UTIL.waitFor(30000, () -> !procExec.isRunning()); + // make sure that finally we could successfully rollback the procedure + while (scp.getState() != ProcedureState.FAILED || !procExec.isRunning()) { + MasterProcedureTestingUtility.restartMasterProcedureExecutor(procExec); + ProcedureTestingUtility.waitProcedure(procExec, scp); + } + assertEquals(scp.getState(), ProcedureState.FAILED); + assertThat(scp.getException().getMessage(), containsString("inject code bug")); + // make sure all sub procedures are cleaned up + assertThat(procExec.getProcedures(), everyItem(not(subProcOf(scp)))); + } +} From 4d90b918a3702b4e4ae2f9ee890c14665e821c01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 11:05:25 +0800 Subject: [PATCH 154/514] HBASE-28225 Bump cryptography in /dev-support/git-jira-release-audit (#5544) Bumps [cryptography](https://github.com/pyca/cryptography) from 41.0.4 to 41.0.6. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/41.0.4...41.0.6) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index c243f731e1d6..23a4b916fd4b 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -19,7 +19,7 @@ blessed==1.17.0 certifi==2023.7.22 cffi==1.13.2 chardet==3.0.4 -cryptography==41.0.4 +cryptography==41.0.6 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 From 44eb408a6d3dd623f482286dacb61098041b17c4 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Wed, 29 Nov 2023 10:40:33 +0000 Subject: [PATCH 155/514] HBASE-28211 BucketCache.blocksByHFile may leak on allocationFailure or if we reach io errors tolerated (#5530) Signed-off-by: Duo Zhang --- .../hadoop/hbase/io/hfile/bucket/BucketCache.java | 12 ++++++------ .../io/hfile/bucket/TestBucketWriterThread.java | 4 ++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index ba33d5e02c48..0d5104572605 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -222,7 +222,7 @@ public class BucketCache implements BlockCache, HeapSize { */ transient final IdReadWriteLock offsetLock; - private final NavigableSet blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> { + final NavigableSet blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> { int nameComparison = a.getHfileName().compareTo(b.getHfileName()); if (nameComparison != 0) { return nameComparison; @@ -644,12 +644,14 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre blocksByHFile.remove(cacheKey); if (decrementBlockNumber) { this.blockNumber.decrement(); + if (ioEngine.isPersistent()) { + removeFileFromPrefetch(cacheKey.getHfileName()); + } } if (evictedByEvictionProcess) { cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); } if (ioEngine.isPersistent()) { - removeFileFromPrefetch(cacheKey.getHfileName()); setCacheInconsistent(true); } } @@ -1084,6 +1086,7 @@ public void run() { */ protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { BucketEntry previousEntry = backingMap.put(key, bucketEntry); + blocksByHFile.add(key); if (previousEntry != null && previousEntry != bucketEntry) { previousEntry.withWriteLock(offsetLock, () -> { blockEvicted(key, previousEntry, false, false); @@ -1164,10 +1167,6 @@ void doDrain(final List entries, ByteBuffer metaBuff) throws Inte index++; continue; } - BlockCacheKey cacheKey = re.getKey(); - if (ramCache.containsKey(cacheKey)) { - blocksByHFile.add(cacheKey); - } // Reset the position for reuse. // It should be guaranteed that the data in the metaBuff has been transferred to the // ioEngine safely. Otherwise, this reuse is problematic. Fortunately, the data is already @@ -1518,6 +1517,7 @@ private void disableCache() { if (!ioEngine.isPersistent() || persistencePath == null) { // If persistent ioengine and a path, we will serialize out the backingMap. this.backingMap.clear(); + this.blocksByHFile.clear(); this.fullyCachedFiles.clear(); this.regionCachedSizeMap.clear(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index 4b729f334116..429fffa38f6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -122,6 +122,8 @@ public void testTooBigEntry() throws InterruptedException { Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE); this.bc.cacheBlock(this.plainKey, tooBigCacheable); doDrainOfOneEntry(this.bc, this.wt, this.q); + assertTrue(bc.blocksByHFile.isEmpty()); + assertTrue(bc.getBackingMap().isEmpty()); } /** @@ -138,6 +140,8 @@ public void testIOE() throws IOException, InterruptedException { Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); this.q.add(spiedRqe); doDrainOfOneEntry(bc, wt, q); + assertTrue(bc.blocksByHFile.isEmpty()); + assertTrue(bc.getBackingMap().isEmpty()); // Cache disabled when ioes w/o ever healing. assertTrue(!bc.isCacheEnabled()); } From 7f3e40026892575151b2d780a9fd5f5305fe96b9 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 30 Nov 2023 21:09:05 -0800 Subject: [PATCH 156/514] Revert "HBASE-28204 Canary can take lot more time If region starts with delete markers (#5522)" This reverts commit ce9eabe61661599d0b424026841eaf0087d84805. --- .../apache/hadoop/hbase/tool/CanaryTool.java | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index d0cd199ecdc9..d5676263c820 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -510,38 +510,38 @@ public Void call() { private Void readColumnFamily(Table table, ColumnFamilyDescriptor column) { byte[] startKey = null; - Scan scan = new Scan(); + Get get = null; + Scan scan = null; ResultScanner rs = null; StopWatch stopWatch = new StopWatch(); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. if (startKey.length > 0) { - // There are 4 types of region for any table. - // 1. Start and End key are empty. (Table with Single region) - // 2. Start key is empty. (First region of the table) - // 3. End key is empty. (Last region of the table) - // 4. Region with Start & End key. (All the regions between first & last region of the - // table.) - // - // Since Scan only takes Start and/or End Row and doesn't accept the region ID, - // we set the start row when Regions are of type 3 OR 4 as mentioned above. - // For type 1 and 2, We don't need to set this option. - scan.withStartRow(startKey); + get = new Get(startKey); + get.setCacheBlocks(false); + get.setFilter(new FirstKeyOnlyFilter()); + get.addFamily(column.getName()); + } else { + scan = new Scan(); + LOG.debug("rawScan {} for {}", rawScanEnabled, region.getTable()); + scan.setRaw(rawScanEnabled); + scan.setCaching(1); + scan.setCacheBlocks(false); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.addFamily(column.getName()); + scan.setMaxResultSize(1L); + scan.setOneRowLimit(); } - LOG.debug("rawScan {} for {}", rawScanEnabled, region.getTable()); - scan.setRaw(rawScanEnabled); - scan.setCaching(1); - scan.setCacheBlocks(false); - scan.setFilter(new FirstKeyOnlyFilter()); - scan.addFamily(column.getName()); - scan.setMaxResultSize(1L); - scan.setOneRowLimit(); LOG.debug("Reading from {} {} {} {}", region.getTable(), region.getRegionNameAsString(), column.getNameAsString(), Bytes.toStringBinary(startKey)); try { stopWatch.start(); - rs = table.getScanner(scan); - rs.next(); + if (startKey.length > 0) { + table.get(get); + } else { + rs = table.getScanner(scan); + rs.next(); + } stopWatch.stop(); this.readWriteLatency.add(stopWatch.getTime()); sink.publishReadTiming(serverName, region, column, stopWatch.getTime()); From 02217e1abc0f467d5d5c6e3b5fd2304b2813df49 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 1 Dec 2023 08:23:52 -0500 Subject: [PATCH 157/514] HBASE-28222 Leak in ExportSnapshot during verifySnapshot on S3A (#5554) Revert "HBASE-12819 ExportSnapshot doesn't close FileSystem instances" This reverts commit ee32eebeab38be4e171c6aaf362aff9a584a37f3 Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index f2a8e00fea5e..c6f655c37306 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; @@ -211,14 +210,12 @@ public void setup(Context context) throws IOException { outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); try { - srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); inputFs = FileSystem.get(inputRoot.toUri(), srcConf); } catch (IOException e) { throw new IOException("Could not get the input FileSystem with root=" + inputRoot, e); } try { - destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); outputFs = FileSystem.get(outputRoot.toUri(), destConf); } catch (IOException e) { throw new IOException("Could not get the output FileSystem with root=" + outputRoot, e); @@ -241,12 +238,6 @@ public void setup(Context context) throws IOException { } } - @Override - protected void cleanup(Context context) { - IOUtils.closeStream(inputFs); - IOUtils.closeStream(outputFs); - } - @Override public void map(BytesWritable key, NullWritable value, Context context) throws InterruptedException, IOException { @@ -990,10 +981,8 @@ public int doWork() throws IOException { } Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); - srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); - destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; @@ -1149,9 +1138,6 @@ public int doWork() throws IOException { } outputFs.delete(outputSnapshotDir, true); return 1; - } finally { - IOUtils.closeStream(inputFs); - IOUtils.closeStream(outputFs); } } From 7dd4d0c532a0d4e60037c193b2ce22f0b03ca01e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 1 Dec 2023 23:31:33 +0800 Subject: [PATCH 158/514] HBASE-28212 Addendum fix TestShell (#5555) We added a new field in Procedure so the json output is also changed thus we need to change the assertion --- .../src/test/ruby/shell/list_locks_test.rb | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb b/hbase-shell/src/test/ruby/shell/list_locks_test.rb index 20a910c485dd..89c6940db2a6 100644 --- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb +++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb @@ -81,7 +81,8 @@ def create_shared_lock(proc_id) "\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \ "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", \"state\"=>\"RUNNABLE\", " \ "\"lastUpdate\"=>\"0\", " \ - "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}]" \ + "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}], " \ + "\"executed\"=>false" \ "}\n\n", output) end @@ -101,7 +102,8 @@ def create_shared_lock(proc_id) "\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \ "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", \"state\"=>\"RUNNABLE\", " \ "\"lastUpdate\"=>\"0\", " \ - "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}]" \ + "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}], " \ + "\"executed\"=>false" \ "}\n\n", output) end @@ -119,7 +121,8 @@ def create_shared_lock(proc_id) "\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \ "\"procId\"=>\"1\", \"submittedTime\"=>\"0\", \"state\"=>\"RUNNABLE\", " \ "\"lastUpdate\"=>\"0\", " \ - "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}]" \ + "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}], " \ + "\"executed\"=>false" \ "}\n\n" \ "TABLE(hbase:namespace)\n" \ "Lock type: SHARED, count: 1\n\n", @@ -143,7 +146,8 @@ def create_shared_lock(proc_id) "\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \ "\"procId\"=>\"2\", \"submittedTime\"=>\"0\", \"state\"=>\"RUNNABLE\", " \ "\"lastUpdate\"=>\"0\", " \ - "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}]" \ + "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}], " \ + "\"executed\"=>false" \ "}\n\n", output) end @@ -168,7 +172,8 @@ def create_shared_lock(proc_id) "\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \ "\"procId\"=>\"3\", \"submittedTime\"=>\"0\", \"state\"=>\"RUNNABLE\", " \ "\"lastUpdate\"=>\"0\", " \ - "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}]" \ + "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", \"description\"=>\"description\"}], " \ + "\"executed\"=>false" \ "}\n\n", output) end @@ -198,14 +203,14 @@ def create_shared_lock(proc_id) "\"lastUpdate\"=>\"0\", \"stateMessage\"=>[{" \ "\"lockType\"=>\"EXCLUSIVE\", " \ "\"tableName\"=>{\"namespace\"=>\"bnM0\", \"qualifier\"=>\"dGFibGU0\"" \ - "}, \"description\"=>\"description\"}]}\n" \ + "}, \"description\"=>\"description\"}], \"executed\"=>false}\n" \ "Waiting procedures\n" \ "{\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \ "\"procId\"=>\"2\", \"submittedTime\"=>\"0\", \"state\"=>\"RUNNABLE\", " \ "\"lastUpdate\"=>\"0\", \"stateMessage\"=>[{" \ "\"lockType\"=>\"SHARED\", " \ "\"tableName\"=>{\"namespace\"=>\"bnM0\", \"qualifier\"=>\"dGFibGU0\"}, " \ - "\"description\"=>\"description\"}]}\n" \ + "\"description\"=>\"description\"}], \"executed\"=>false}\n" \ "1 row(s)\n\n", output) end From 8631714705231aa5db5397d92e4ea9d0e5625129 Mon Sep 17 00:00:00 2001 From: hiping-tech <58875741+hiping-tech@users.noreply.github.com> Date: Mon, 4 Dec 2023 15:51:28 +0800 Subject: [PATCH 159/514] HBASE-28226 Add logic to check for RegionStateNode null pointer in FlushRegionProcedure (#5548) Co-authored-by: lvhaiping.lhp Signed-off-by: Duo Zhang --- .../hadoop/hbase/master/procedure/FlushRegionProcedure.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java index 67f0442b618a..88f7e652cbff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java @@ -88,6 +88,11 @@ protected Procedure[] execute(MasterProcedureEnv env) RegionStates regionStates = env.getAssignmentManager().getRegionStates(); RegionStateNode regionNode = regionStates.getRegionStateNode(region); + if (regionNode == null) { + LOG.debug("Region {} is not in region states, it is very likely that it has been cleared by" + + " other procedures such as merge or split, so skip {}. See HBASE-28226", region, this); + return null; + } regionNode.lock(); try { if (!regionNode.isInState(State.OPEN) || regionNode.isInTransition()) { From cf798adeccd575169a1e1e723cd6e1496c380c3f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 4 Dec 2023 16:52:57 +0800 Subject: [PATCH 160/514] HBASE-28199 Phase I: Suspend TRSP and SCP when updating meta (#5520) Signed-off-by: Yu Li --- .../hbase/procedure2/ProcedureExecutor.java | 57 ++++-- .../master/assignment/AssignmentManager.java | 150 ++++++++-------- .../assignment/RegionRemoteProcedureBase.java | 46 +++-- .../master/assignment/RegionStateNode.java | 33 +++- .../assignment/RegionStateNodeLock.java | 166 ++++++++++++++++++ .../master/assignment/RegionStateStore.java | 67 +++++-- .../TransitRegionStateProcedure.java | 126 +++++++++---- .../master/procedure/MasterProcedureEnv.java | 8 + .../procedure/ServerCrashProcedure.java | 57 +++++- .../procedure/TruncateRegionProcedure.java | 2 +- ...eplicationQueueFromZkToTableProcedure.java | 78 ++++---- .../replication/ReplicationPeerManager.java | 2 +- .../hbase/procedure2/ProcedureFutureUtil.java | 112 ++++++++++++ .../master/assignment/MockMasterServices.java | 4 +- .../assignment/TestAssignmentManagerUtil.java | 3 +- .../TestOpenRegionProcedureBackoff.java | 7 +- .../assignment/TestRaceBetweenSCPAndTRSP.java | 13 +- .../assignment/TestRegionStateNodeLock.java | 139 +++++++++++++++ .../master/assignment/TestRollbackSCP.java | 8 +- .../procedure/TestProcedurePriority.java | 20 ++- 20 files changed, 879 insertions(+), 219 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNodeLock.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateNodeLock.java diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 3099c64e00f6..5aa11811122b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -32,8 +32,10 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -237,6 +239,12 @@ public interface ProcedureExecutorListener { */ private TimeoutExecutorThread workerMonitorExecutor; + private ExecutorService forceUpdateExecutor; + + // A thread pool for executing some asynchronous tasks for procedures, you can find references to + // getAsyncTaskExecutor to see the usage + private ExecutorService asyncTaskExecutor; + private int corePoolSize; private int maxPoolSize; @@ -247,9 +255,6 @@ public interface ProcedureExecutorListener { */ private final ProcedureScheduler scheduler; - private final Executor forceUpdateExecutor = Executors.newSingleThreadExecutor( - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build()); - private final AtomicLong lastProcId = new AtomicLong(-1); private final AtomicLong workerId = new AtomicLong(0); private final AtomicInteger activeExecutorCount = new AtomicInteger(0); @@ -317,19 +322,6 @@ public ProcedureExecutor(final Configuration conf, final TEnvironment environmen this.conf = conf; this.checkOwnerSet = conf.getBoolean(CHECK_OWNER_SET_CONF_KEY, DEFAULT_CHECK_OWNER_SET); refreshConfiguration(conf); - store.registerListener(new ProcedureStoreListener() { - - @Override - public void forceUpdate(long[] procIds) { - Arrays.stream(procIds).forEach(procId -> forceUpdateExecutor.execute(() -> { - try { - forceUpdateProcedure(procId); - } catch (IOException e) { - LOG.warn("Failed to force update procedure with pid={}", procId); - } - })); - } - }); } private void load(final boolean abortOnCorruption) throws IOException { @@ -614,6 +606,28 @@ public void init(int numThreads, boolean abortOnCorruption) throws IOException { this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout"); this.workerMonitorExecutor = new TimeoutExecutorThread<>(this, threadGroup, "WorkerMonitor"); + int size = Math.max(2, Runtime.getRuntime().availableProcessors()); + ThreadPoolExecutor executor = new ThreadPoolExecutor(size, size, 1, TimeUnit.MINUTES, + new LinkedBlockingQueue(), new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat(getClass().getSimpleName() + "-Async-Task-Executor-%d").build()); + executor.allowCoreThreadTimeOut(true); + this.asyncTaskExecutor = executor; + forceUpdateExecutor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build()); + store.registerListener(new ProcedureStoreListener() { + + @Override + public void forceUpdate(long[] procIds) { + Arrays.stream(procIds).forEach(procId -> forceUpdateExecutor.execute(() -> { + try { + forceUpdateProcedure(procId); + } catch (IOException e) { + LOG.warn("Failed to force update procedure with pid={}", procId); + } + })); + } + }); + // Create the workers workerId.set(0); workerThreads = new CopyOnWriteArrayList<>(); @@ -678,6 +692,8 @@ public void stop() { scheduler.stop(); timeoutExecutor.sendStopSignal(); workerMonitorExecutor.sendStopSignal(); + forceUpdateExecutor.shutdown(); + asyncTaskExecutor.shutdown(); } public void join() { @@ -2055,6 +2071,13 @@ public IdLock getProcExecutionLock() { return procExecutionLock; } + /** + * Get a thread pool for executing some asynchronous tasks + */ + public ExecutorService getAsyncTaskExecutor() { + return asyncTaskExecutor; + } + // ========================================================================== // Worker Thread // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 804757959d5c..474b95a2a69b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; @@ -1989,71 +1991,78 @@ public RegionInfo getRegionInfo(final String encodedRegionName) { // Should only be called in TransitRegionStateProcedure(and related procedures), as the locking // and pre-assumptions are very tricky. // ============================================================================================ - private void transitStateAndUpdate(RegionStateNode regionNode, RegionState.State newState, - RegionState.State... expectedStates) throws IOException { + private CompletableFuture transitStateAndUpdate(RegionStateNode regionNode, + RegionState.State newState, RegionState.State... expectedStates) { RegionState.State state = regionNode.getState(); - regionNode.transitionState(newState, expectedStates); - boolean succ = false; try { - regionStateStore.updateRegionLocation(regionNode); - succ = true; - } finally { - if (!succ) { + regionNode.transitionState(newState, expectedStates); + } catch (UnexpectedStateException e) { + return FutureUtils.failedFuture(e); + } + CompletableFuture future = regionStateStore.updateRegionLocation(regionNode); + FutureUtils.addListener(future, (r, e) -> { + if (e != null) { // revert regionNode.setState(state); } - } + }); + return future; } // should be called within the synchronized block of RegionStateNode - void regionOpening(RegionStateNode regionNode) throws IOException { + CompletableFuture regionOpening(RegionStateNode regionNode) { // As in SCP, for performance reason, there is no TRSP attached with this region, we will not // update the region state, which means that the region could be in any state when we want to // assign it after a RS crash. So here we do not pass the expectedStates parameter. - transitStateAndUpdate(regionNode, State.OPENING); - regionStates.addRegionToServer(regionNode); - // update the operation count metrics - metrics.incrementOperationCounter(); + return transitStateAndUpdate(regionNode, State.OPENING).thenAccept(r -> { + regionStates.addRegionToServer(regionNode); + // update the operation count metrics + metrics.incrementOperationCounter(); + }); } // should be called under the RegionStateNode lock // The parameter 'giveUp' means whether we will try to open the region again, if it is true, then // we will persist the FAILED_OPEN state into hbase:meta. - void regionFailedOpen(RegionStateNode regionNode, boolean giveUp) throws IOException { + CompletableFuture regionFailedOpen(RegionStateNode regionNode, boolean giveUp) { RegionState.State state = regionNode.getState(); ServerName regionLocation = regionNode.getRegionLocation(); - if (giveUp) { - regionNode.setState(State.FAILED_OPEN); - regionNode.setRegionLocation(null); - boolean succ = false; - try { - regionStateStore.updateRegionLocation(regionNode); - succ = true; - } finally { - if (!succ) { - // revert - regionNode.setState(state); - regionNode.setRegionLocation(regionLocation); - } + if (!giveUp) { + if (regionLocation != null) { + regionStates.removeRegionFromServer(regionLocation, regionNode); } + return CompletableFuture.completedFuture(null); } - if (regionLocation != null) { - regionStates.removeRegionFromServer(regionLocation, regionNode); - } + regionNode.setState(State.FAILED_OPEN); + regionNode.setRegionLocation(null); + CompletableFuture future = regionStateStore.updateRegionLocation(regionNode); + FutureUtils.addListener(future, (r, e) -> { + if (e == null) { + if (regionLocation != null) { + regionStates.removeRegionFromServer(regionLocation, regionNode); + } + } else { + // revert + regionNode.setState(state); + regionNode.setRegionLocation(regionLocation); + } + }); + return future; } // should be called under the RegionStateNode lock - void regionClosing(RegionStateNode regionNode) throws IOException { - transitStateAndUpdate(regionNode, State.CLOSING, STATES_EXPECTED_ON_CLOSING); - - RegionInfo hri = regionNode.getRegionInfo(); - // Set meta has not initialized early. so people trying to create/edit tables will wait - if (isMetaRegion(hri)) { - setMetaAssigned(hri, false); - } - regionStates.addRegionToServer(regionNode); - // update the operation count metrics - metrics.incrementOperationCounter(); + CompletableFuture regionClosing(RegionStateNode regionNode) { + return transitStateAndUpdate(regionNode, State.CLOSING, STATES_EXPECTED_ON_CLOSING) + .thenAccept(r -> { + RegionInfo hri = regionNode.getRegionInfo(); + // Set meta has not initialized early. so people trying to create/edit tables will wait + if (isMetaRegion(hri)) { + setMetaAssigned(hri, false); + } + regionStates.addRegionToServer(regionNode); + // update the operation count metrics + metrics.incrementOperationCounter(); + }); } // for open and close, they will first be persist to the procedure store in @@ -2062,7 +2071,8 @@ void regionClosing(RegionStateNode regionNode) throws IOException { // RegionRemoteProcedureBase is woken up, we will persist the RegionStateNode to hbase:meta. // should be called under the RegionStateNode lock - void regionOpenedWithoutPersistingToMeta(RegionStateNode regionNode) throws IOException { + void regionOpenedWithoutPersistingToMeta(RegionStateNode regionNode) + throws UnexpectedStateException { regionNode.transitionState(State.OPEN, STATES_EXPECTED_ON_OPEN); RegionInfo regionInfo = regionNode.getRegionInfo(); regionStates.addRegionToServer(regionNode); @@ -2070,7 +2080,8 @@ void regionOpenedWithoutPersistingToMeta(RegionStateNode regionNode) throws IOEx } // should be called under the RegionStateNode lock - void regionClosedWithoutPersistingToMeta(RegionStateNode regionNode) throws IOException { + void regionClosedWithoutPersistingToMeta(RegionStateNode regionNode) + throws UnexpectedStateException { ServerName regionLocation = regionNode.getRegionLocation(); regionNode.transitionState(State.CLOSED, STATES_EXPECTED_ON_CLOSED); regionNode.setRegionLocation(null); @@ -2080,40 +2091,41 @@ void regionClosedWithoutPersistingToMeta(RegionStateNode regionNode) throws IOEx } } + // should be called under the RegionStateNode lock + CompletableFuture persistToMeta(RegionStateNode regionNode) { + return regionStateStore.updateRegionLocation(regionNode).thenAccept(r -> { + RegionInfo regionInfo = regionNode.getRegionInfo(); + if (isMetaRegion(regionInfo) && regionNode.getState() == State.OPEN) { + // Usually we'd set a table ENABLED at this stage but hbase:meta is ALWAYs enabled, it + // can't be disabled -- so skip the RPC (besides... enabled is managed by TableStateManager + // which is backed by hbase:meta... Avoid setting ENABLED to avoid having to update state + // on table that contains state. + setMetaAssigned(regionInfo, true); + } + }); + } + // should be called under the RegionStateNode lock // for SCP - public void regionClosedAbnormally(RegionStateNode regionNode) throws IOException { + public CompletableFuture regionClosedAbnormally(RegionStateNode regionNode) { RegionState.State state = regionNode.getState(); ServerName regionLocation = regionNode.getRegionLocation(); - regionNode.transitionState(State.ABNORMALLY_CLOSED); + regionNode.setState(State.ABNORMALLY_CLOSED); regionNode.setRegionLocation(null); - boolean succ = false; - try { - regionStateStore.updateRegionLocation(regionNode); - succ = true; - } finally { - if (!succ) { + CompletableFuture future = regionStateStore.updateRegionLocation(regionNode); + FutureUtils.addListener(future, (r, e) -> { + if (e == null) { + if (regionLocation != null) { + regionNode.setLastHost(regionLocation); + regionStates.removeRegionFromServer(regionLocation, regionNode); + } + } else { // revert regionNode.setState(state); regionNode.setRegionLocation(regionLocation); } - } - if (regionLocation != null) { - regionNode.setLastHost(regionLocation); - regionStates.removeRegionFromServer(regionLocation, regionNode); - } - } - - void persistToMeta(RegionStateNode regionNode) throws IOException { - regionStateStore.updateRegionLocation(regionNode); - RegionInfo regionInfo = regionNode.getRegionInfo(); - if (isMetaRegion(regionInfo) && regionNode.getState() == State.OPEN) { - // Usually we'd set a table ENABLED at this stage but hbase:meta is ALWAYs enabled, it - // can't be disabled -- so skip the RPC (besides... enabled is managed by TableStateManager - // which is backed by hbase:meta... Avoid setting ENABLED to avoid having to update state - // on table that contains state. - setMetaAssigned(regionInfo, true); - } + }); + return future; } // ============================================================================================ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index 6b6da9e33965..d27e0068b0ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.Optional; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; +import org.apache.hadoop.hbase.procedure2.ProcedureFutureUtil; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; @@ -73,6 +75,8 @@ public abstract class RegionRemoteProcedureBase extends Procedure future; + protected RegionRemoteProcedureBase() { } @@ -268,11 +272,21 @@ private void unattach(MasterProcedureEnv env) { getParent(env).unattachRemoteProc(this); } + private CompletableFuture getFuture() { + return future; + } + + private void setFuture(CompletableFuture f) { + future = f; + } + @Override protected Procedure[] execute(MasterProcedureEnv env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { RegionStateNode regionNode = getRegionNode(env); - regionNode.lock(); + if (future == null) { + regionNode.lock(this); + } try { switch (state) { case REGION_REMOTE_PROCEDURE_DISPATCH: { @@ -294,16 +308,29 @@ protected Procedure[] execute(MasterProcedureEnv env) throw new ProcedureSuspendedException(); } case REGION_REMOTE_PROCEDURE_REPORT_SUCCEED: - env.getAssignmentManager().persistToMeta(regionNode); - unattach(env); + if ( + ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, + () -> unattach(env)) + ) { + return null; + } + ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, + env.getAssignmentManager().persistToMeta(regionNode), env, () -> unattach(env)); return null; case REGION_REMOTE_PROCEDURE_DISPATCH_FAIL: // the remote call is failed so we do not need to change the region state, just return. unattach(env); return null; case REGION_REMOTE_PROCEDURE_SERVER_CRASH: - env.getAssignmentManager().regionClosedAbnormally(regionNode); - unattach(env); + if ( + ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, + () -> unattach(env)) + ) { + return null; + } + ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, + env.getAssignmentManager().regionClosedAbnormally(regionNode), env, + () -> unattach(env)); return null; default: throw new IllegalStateException("Unknown state: " + state); @@ -314,12 +341,11 @@ protected Procedure[] execute(MasterProcedureEnv env) } long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); LOG.warn("Failed updating meta, suspend {}secs {}; {};", backoff / 1000, this, regionNode, e); - setTimeout(Math.toIntExact(backoff)); - setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); - skipPersistence(); - throw new ProcedureSuspendedException(); + throw suspend(Math.toIntExact(backoff), true); } finally { - regionNode.unlock(); + if (future == null) { + regionNode.unlock(this); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java index 91c0222facd1..de00ca92e4c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java @@ -19,8 +19,6 @@ import java.util.Arrays; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -30,6 +28,7 @@ import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -75,7 +74,7 @@ public AssignmentProcedureEvent(final RegionInfo regionInfo) { } } - final Lock lock = new ReentrantLock(); + private final RegionStateNodeLock lock; private final RegionInfo regionInfo; private final ProcedureEvent event; private final ConcurrentMap ritMap; @@ -106,6 +105,7 @@ public AssignmentProcedureEvent(final RegionInfo regionInfo) { this.regionInfo = regionInfo; this.event = new AssignmentProcedureEvent(regionInfo); this.ritMap = ritMap; + this.lock = new RegionStateNodeLock(regionInfo); } /** @@ -319,6 +319,9 @@ public void checkOnline() throws DoNotRetryRegionException { } } + // The below 3 methods are for normal locking operation, where the thread owner is the current + // thread. Typically you just need to use these 3 methods, and use try..finally to release the + // lock in the finally block public void lock() { lock.lock(); } @@ -330,4 +333,28 @@ public boolean tryLock() { public void unlock() { lock.unlock(); } + + // The below 3 methods are for locking region state node when executing procedures, where we may + // do some time consuming work under the lock, for example, updating meta. As we may suspend the + // procedure while holding the lock and then release it when the procedure is back, in another + // thread, so we need to use the procedure itself as owner, instead of the current thread. You can + // see the usage in TRSP, SCP, and RegionRemoteProcedureBase for more details. + // Notice that, this does not mean you must use these 3 methods when locking region state node in + // procedure, you are free to use the above 3 methods if you do not want to hold the lock when + // suspending the procedure. + public void lock(Procedure proc) { + lock.lock(proc); + } + + public boolean tryLock(Procedure proc) { + return lock.tryLock(proc); + } + + public void unlock(Procedure proc) { + lock.unlock(proc); + } + + boolean isLocked() { + return lock.isLocked(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNodeLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNodeLock.java new file mode 100644 index 000000000000..a672425c8ed2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNodeLock.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A lock implementation which supports unlock by another thread. + *

+ * This is because we need to hold region state node lock while updating region state to meta(for + * keeping consistency), so it is better to yield the procedure to release the procedure worker. But + * after waking up the procedure, we may use another procedure worker to execute the procedure, + * which means we need to unlock by another thread. See HBASE-28196 for more details. + */ +@InterfaceAudience.Private +class RegionStateNodeLock { + + // for better logging message + private final RegionInfo regionInfo; + + private final Lock lock = new ReentrantLock(); + + private final Condition cond = lock.newCondition(); + + private Object owner; + + private int count; + + RegionStateNodeLock(RegionInfo regionInfo) { + this.regionInfo = regionInfo; + } + + private void lock0(Object lockBy) { + lock.lock(); + try { + for (;;) { + if (owner == null) { + owner = lockBy; + count = 1; + return; + } + if (owner == lockBy) { + count++; + return; + } + cond.awaitUninterruptibly(); + } + } finally { + lock.unlock(); + } + } + + private boolean tryLock0(Object lockBy) { + if (!lock.tryLock()) { + return false; + } + try { + if (owner == null) { + owner = lockBy; + count = 1; + return true; + } + if (owner == lockBy) { + count++; + return true; + } + return false; + } finally { + lock.unlock(); + } + } + + private void unlock0(Object unlockBy) { + lock.lock(); + try { + if (owner == null) { + throw new IllegalMonitorStateException("RegionStateNode " + regionInfo + " is not locked"); + } + if (owner != unlockBy) { + throw new IllegalMonitorStateException("RegionStateNode " + regionInfo + " is locked by " + + owner + ", can not be unlocked by " + unlockBy); + } + count--; + if (count == 0) { + owner = null; + cond.signal(); + } + } finally { + lock.unlock(); + } + } + + /** + * Normal lock, will set the current thread as owner. Typically you should use try...finally to + * call unlock in the finally block. + */ + void lock() { + lock0(Thread.currentThread()); + } + + /** + * Normal tryLock, will set the current thread as owner. Typically you should use try...finally to + * call unlock in the finally block. + */ + boolean tryLock() { + return tryLock0(Thread.currentThread()); + } + + /** + * Normal unLock, will use the current thread as owner. Typically you should use try...finally to + * call unlock in the finally block. + */ + void unlock() { + unlock0(Thread.currentThread()); + } + + /** + * Lock by a procedure. You can release the lock in another thread. + */ + void lock(Procedure proc) { + lock0(proc); + } + + /** + * TryLock by a procedure. You can release the lock in another thread. + */ + boolean tryLock(Procedure proc) { + return tryLock0(proc); + } + + /** + * Unlock by a procedure. You do not need to call this method in the same thread with lock. + */ + void unlock(Procedure proc) { + unlock0(proc); + } + + boolean isLocked() { + lock.lock(); + try { + return owner != null; + } finally { + lock.unlock(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 3561e0cd055b..4d506365f238 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -176,7 +176,7 @@ public static void visitMetaEntry(final RegionStateVisitor visitor, final Result } } - void updateRegionLocation(RegionStateNode regionStateNode) throws IOException { + private Put generateUpdateRegionLocationPut(RegionStateNode regionStateNode) throws IOException { long time = EnvironmentEdgeManager.currentTime(); long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() @@ -221,11 +221,34 @@ && hasGlobalReplicationScope(regionInfo.getTable()) .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name())) .build()); LOG.info(info.toString()); - updateRegionLocation(regionInfo, state, put); + return put; + } + + CompletableFuture updateRegionLocation(RegionStateNode regionStateNode) { + Put put; + try { + put = generateUpdateRegionLocationPut(regionStateNode); + } catch (IOException e) { + return FutureUtils.failedFuture(e); + } + RegionInfo regionInfo = regionStateNode.getRegionInfo(); + State state = regionStateNode.getState(); + CompletableFuture future = updateRegionLocation(regionInfo, state, put); if (regionInfo.isMetaRegion() && regionInfo.isFirst()) { // mirror the meta location to zookeeper - mirrorMetaLocation(regionInfo, regionLocation, state); + // we store meta location in master local region which means the above method is + // synchronous(we just wrap the result with a CompletableFuture to make it look like + // asynchronous), so it is OK to just call this method directly here + assert future.isDone(); + if (!future.isCompletedExceptionally()) { + try { + mirrorMetaLocation(regionInfo, regionStateNode.getRegionLocation(), state); + } catch (IOException e) { + return FutureUtils.failedFuture(e); + } + } } + return future; } private void mirrorMetaLocation(RegionInfo regionInfo, ServerName serverName, State state) @@ -249,25 +272,31 @@ private void removeMirrorMetaLocation(int oldReplicaCount, int newReplicaCount) } } - private void updateRegionLocation(RegionInfo regionInfo, State state, Put put) - throws IOException { - try { - if (regionInfo.isMetaRegion()) { + private CompletableFuture updateRegionLocation(RegionInfo regionInfo, State state, + Put put) { + CompletableFuture future; + if (regionInfo.isMetaRegion()) { + try { masterRegion.update(r -> r.put(put)); - } else { - try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { - table.put(put); - } + future = CompletableFuture.completedFuture(null); + } catch (Exception e) { + future = FutureUtils.failedFuture(e); } - } catch (IOException e) { - // TODO: Revist!!!! Means that if a server is loaded, then we will abort our host! - // In tests we abort the Master! - String msg = String.format("FAILED persisting region=%s state=%s", - regionInfo.getShortNameToLog(), state); - LOG.error(msg, e); - master.abort(msg, e); - throw e; + } else { + AsyncTable table = master.getAsyncConnection().getTable(TableName.META_TABLE_NAME); + future = table.put(put); } + FutureUtils.addListener(future, (r, e) -> { + if (e != null) { + // TODO: Revist!!!! Means that if a server is loaded, then we will abort our host! + // In tests we abort the Master! + String msg = String.format("FAILED persisting region=%s state=%s", + regionInfo.getShortNameToLog(), state); + LOG.error(msg, e); + master.abort(msg, e); + } + }); + return future; } private long getOpenSeqNumForParentRegion(RegionInfo region) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index 81397915647d..911c0f3111e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -24,6 +24,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.ServerName; @@ -38,11 +39,13 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureFutureUtil; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -137,6 +140,8 @@ public class TransitRegionStateProcedure private long forceRetainmentTotalWait; + private CompletableFuture future; + public TransitRegionStateProcedure() { } @@ -268,21 +273,54 @@ private void queueAssign(MasterProcedureEnv env, RegionStateNode regionNode) } } - private void openRegion(MasterProcedureEnv env, RegionStateNode regionNode) throws IOException { + private CompletableFuture getFuture() { + return future; + } + + private void setFuture(CompletableFuture f) { + future = f; + } + + private void openRegionAfterUpdatingMeta(ServerName loc) { + addChildProcedure(new OpenRegionProcedure(this, getRegion(), loc)); + setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_OPENED); + } + + private void openRegion(MasterProcedureEnv env, RegionStateNode regionNode) + throws IOException, ProcedureSuspendedException { ServerName loc = regionNode.getRegionLocation(); + if ( + ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, + () -> openRegionAfterUpdatingMeta(loc)) + ) { + return; + } if (loc == null || BOGUS_SERVER_NAME.equals(loc)) { LOG.warn("No location specified for {}, jump back to state {} to get one", getRegion(), RegionStateTransitionState.REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE); setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE); throw new HBaseIOException("Failed to open region, the location is null or bogus."); } - env.getAssignmentManager().regionOpening(regionNode); - addChildProcedure(new OpenRegionProcedure(this, getRegion(), loc)); - setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_OPENED); + ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, + env.getAssignmentManager().regionOpening(regionNode), env, + () -> openRegionAfterUpdatingMeta(loc)); + } + + private void regionFailedOpenAfterUpdatingMeta(MasterProcedureEnv env, + RegionStateNode regionNode) { + setFailure(getClass().getSimpleName(), new RetriesExhaustedException( + "Max attempts " + env.getAssignmentManager().getAssignMaxAttempts() + " exceeded")); + regionNode.unsetProcedure(this); } private Flow confirmOpened(MasterProcedureEnv env, RegionStateNode regionNode) - throws IOException { + throws IOException, ProcedureSuspendedException { + if ( + ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, + () -> regionFailedOpenAfterUpdatingMeta(env, regionNode)) + ) { + return Flow.NO_MORE_STATE; + } if (regionNode.isInState(State.OPEN)) { retryCounter = null; if (lastState == RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_OPENED) { @@ -306,14 +344,16 @@ private Flow confirmOpened(MasterProcedureEnv env, RegionStateNode regionNode) LOG.info("Retry={} of max={}; {}; {}", retries, maxAttempts, this, regionNode.toShortString()); if (retries >= maxAttempts) { - env.getAssignmentManager().regionFailedOpen(regionNode, true); - setFailure(getClass().getSimpleName(), new RetriesExhaustedException( - "Max attempts " + env.getAssignmentManager().getAssignMaxAttempts() + " exceeded")); - regionNode.unsetProcedure(this); + ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, + env.getAssignmentManager().regionFailedOpen(regionNode, true), env, + () -> regionFailedOpenAfterUpdatingMeta(env, regionNode)); return Flow.NO_MORE_STATE; } - env.getAssignmentManager().regionFailedOpen(regionNode, false); + // if not giving up, we will not update meta, so the returned CompletableFuture should be a fake + // one, which should have been completed already + CompletableFuture future = env.getAssignmentManager().regionFailedOpen(regionNode, false); + assert future.isDone(); // we failed to assign the region, force a new plan forceNewPlan = true; regionNode.setRegionLocation(null); @@ -329,17 +369,29 @@ private Flow confirmOpened(MasterProcedureEnv env, RegionStateNode regionNode) } } - private void closeRegion(MasterProcedureEnv env, RegionStateNode regionNode) throws IOException { + private void closeRegionAfterUpdatingMeta(RegionStateNode regionNode) { + CloseRegionProcedure closeProc = isSplit + ? new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), assignCandidate, + true) + : new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), assignCandidate, + evictCache); + addChildProcedure(closeProc); + setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_CLOSED); + } + + private void closeRegion(MasterProcedureEnv env, RegionStateNode regionNode) + throws IOException, ProcedureSuspendedException { + if ( + ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, + () -> closeRegionAfterUpdatingMeta(regionNode)) + ) { + return; + } if (regionNode.isInState(State.OPEN, State.CLOSING, State.MERGING, State.SPLITTING)) { // this is the normal case - env.getAssignmentManager().regionClosing(regionNode); - CloseRegionProcedure closeProc = isSplit - ? new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), - assignCandidate, true) - : new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), - assignCandidate, evictCache); - addChildProcedure(closeProc); - setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_CLOSED); + ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, + env.getAssignmentManager().regionClosing(regionNode), env, + () -> closeRegionAfterUpdatingMeta(regionNode)); } else { forceNewPlan = true; regionNode.setRegionLocation(null); @@ -393,11 +445,18 @@ protected Procedure[] execute(MasterProcedureEnv env) throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(getRegion()); - regionNode.lock(); + if (future == null) { + // if future is not null, we will not release the regionNode lock, so do not need to lock it + // again + regionNode.lock(this); + } try { return super.execute(env); } finally { - regionNode.unlock(); + if (future == null) { + // release the lock if there is no pending updating meta operation + regionNode.unlock(this); + } } } @@ -452,10 +511,7 @@ protected Flow executeFromState(MasterProcedureEnv env, RegionStateTransitionSta "Failed transition, suspend {}secs {}; {}; waiting on rectified condition fixed " + "by other Procedure or operator intervention", backoff / 1000, this, regionNode.toShortString(), e); - setTimeout(Math.toIntExact(backoff)); - setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); - skipPersistence(); - throw new ProcedureSuspendedException(); + throw suspend(Math.toIntExact(backoff), true); } } @@ -492,15 +548,25 @@ public void reportTransition(MasterProcedureEnv env, RegionStateNode regionNode, } // Should be called with RegionStateNode locked - public void serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode, - ServerName serverName, boolean forceNewPlan) throws IOException { + public CompletableFuture serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode, + ServerName serverName, boolean forceNewPlan) { this.forceNewPlan = forceNewPlan; if (remoteProc != null) { // this means we are waiting for the sub procedure, so wake it up - remoteProc.serverCrashed(env, regionNode, serverName); + try { + remoteProc.serverCrashed(env, regionNode, serverName); + } catch (Exception e) { + return FutureUtils.failedFuture(e); + } + return CompletableFuture.completedFuture(null); } else { - // we are in RUNNING state, just update the region state, and we will process it later. - env.getAssignmentManager().regionClosedAbnormally(regionNode); + if (regionNode.isInState(State.ABNORMALLY_CLOSED)) { + // should be a retry, where we have already changed the region state to abnormally closed + return CompletableFuture.completedFuture(null); + } else { + // we are in RUNNING state, just update the region state, and we will process it later. + return env.getAssignmentManager().regionClosedAbnormally(regionNode); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java index f7f4146bd0d5..218d3096d8df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; +import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -86,6 +87,13 @@ public MasterProcedureEnv(final MasterServices master, this.remoteDispatcher = remoteDispatcher; } + /** + * Get a thread pool for executing some asynchronous tasks + */ + public ExecutorService getAsyncTaskExecutor() { + return master.getMasterProcedureExecutor().getAsyncTaskExecutor(); + } + public User getRequestUser() { return RpcServer.getRequestUser().orElse(Superusers.getSystemUser()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 97976756d828..901cc38a7be7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureFutureUtil; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; @@ -107,6 +109,10 @@ public class ServerCrashProcedure extends // progress will not update the state because the actual state is overwritten by its next state private ServerCrashState currentRunningState = getInitialState(); + private CompletableFuture updateMetaFuture; + + private int processedRegions = 0; + /** * Call this constructor queuing up a Procedure. * @param serverName Name of the crashed server. @@ -532,6 +538,14 @@ protected boolean isMatchingRegionLocation(RegionStateNode rsn) { return this.serverName.equals(rsn.getRegionLocation()); } + private CompletableFuture getUpdateMetaFuture() { + return updateMetaFuture; + } + + private void setUpdateMetaFuture(CompletableFuture f) { + updateMetaFuture = f; + } + /** * Assign the regions on the crashed RS to other Rses. *

@@ -542,14 +556,30 @@ protected boolean isMatchingRegionLocation(RegionStateNode rsn) { * We will also check whether the table for a region is enabled, if not, we will skip assigning * it. */ - private void assignRegions(MasterProcedureEnv env, List regions) throws IOException { + private void assignRegions(MasterProcedureEnv env, List regions) + throws IOException, ProcedureSuspendedException { AssignmentManager am = env.getMasterServices().getAssignmentManager(); boolean retainAssignment = env.getMasterConfiguration().getBoolean(MASTER_SCP_RETAIN_ASSIGNMENT, DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT); - for (RegionInfo region : regions) { + // Since we may suspend in the middle of this loop, so here we use processedRegions to record + // the progress, so next time we can locate the correct region + // We do not need to persist the processedRegions when serializing the procedure, as when master + // restarts, the sub procedure list will be cleared when rescheduling this SCP again, so we need + // to start from beginning. + for (int n = regions.size(); processedRegions < n; processedRegions++) { + RegionInfo region = regions.get(processedRegions); RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region); - regionNode.lock(); + if (updateMetaFuture == null) { + regionNode.lock(this); + } try { + if ( + ProcedureFutureUtil.checkFuture(this, this::getUpdateMetaFuture, + this::setUpdateMetaFuture, () -> { + }) + ) { + continue; + } // This is possible, as when a server is dead, TRSP will fail to schedule a RemoteProcedure // and then try to assign the region to a new RS. And before it has updated the region // location to the new RS, we may have already called the am.getRegionsOnServer so we will @@ -572,8 +602,10 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr } if (regionNode.getProcedure() != null) { LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode); - regionNode.getProcedure().serverCrashed(env, regionNode, getServerName(), - !retainAssignment); + ProcedureFutureUtil.suspendIfNecessary(this, this::setUpdateMetaFuture, regionNode + .getProcedure().serverCrashed(env, regionNode, getServerName(), !retainAssignment), env, + () -> { + }); continue; } if ( @@ -583,7 +615,9 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr // We need to change the state here otherwise the TRSP scheduled by DTP will try to // close the region from a dead server and will never succeed. Please see HBASE-23636 // for more details. - env.getAssignmentManager().regionClosedAbnormally(regionNode); + ProcedureFutureUtil.suspendIfNecessary(this, this::setUpdateMetaFuture, + env.getAssignmentManager().regionClosedAbnormally(regionNode), env, () -> { + }); LOG.info("{} found table disabling for region {}, set it state to ABNORMALLY_CLOSED.", this, regionNode); continue; @@ -599,11 +633,20 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr TransitRegionStateProcedure proc = TransitRegionStateProcedure.assign(env, region, !retainAssignment, null); regionNode.setProcedure(proc); + // It is OK to still use addChildProcedure even if we suspend in the middle of this loop, as + // the subProcList will only be cleared when we successfully returned from the + // executeFromState method. This means we will submit all the TRSPs after we successfully + // finished this loop addChildProcedure(proc); } finally { - regionNode.unlock(); + if (updateMetaFuture == null) { + regionNode.unlock(this); + } } } + // we will call this method two times if the region server carries meta, so we need to reset it + // to 0 after successfully finished the above loop + processedRegions = 0; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java index 5e907c1681ac..9730391baf22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -110,8 +110,8 @@ assert getRegion().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID || isFailed() private void deleteRegionFromFileSystem(final MasterProcedureEnv env) throws IOException { RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); + regionNode.lock(); try { - regionNode.lock(); final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); HRegionFileSystem.deleteRegionFromFileSystem(env.getMasterConfiguration(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java index c88d613e5260..cff1b3879360 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/MigrateReplicationQueueFromZkToTableProcedure.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.procedure.GlobalProcedureInterface; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface; +import org.apache.hadoop.hbase.procedure2.ProcedureFutureUtil; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; @@ -43,8 +44,6 @@ import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration; -import org.apache.hadoop.hbase.util.FutureUtils; -import org.apache.hadoop.hbase.util.IdLock; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -73,7 +72,7 @@ public class MigrateReplicationQueueFromZkToTableProcedure private List disabledPeerIds; - private CompletableFuture future; + private CompletableFuture future; private ExecutorService executor; @@ -84,6 +83,14 @@ public String getGlobalId() { return getClass().getSimpleName(); } + private CompletableFuture getFuture() { + return future; + } + + private void setFuture(CompletableFuture f) { + future = f; + } + private ProcedureSuspendedException suspend(Configuration conf, LongConsumer backoffConsumer) throws ProcedureSuspendedException { if (retryCounter == null) { @@ -153,6 +160,12 @@ private void waitUntilNoPeerProcedure(MasterProcedureEnv env) throws ProcedureSu LOG.info("No pending peer procedures found, continue..."); } + private void finishMigartion() { + shutdownExecutorService(); + setNextState(MIGRATE_REPLICATION_QUEUE_FROM_ZK_TO_TABLE_WAIT_UPGRADING); + resetRetry(); + } + @Override protected Flow executeFromState(MasterProcedureEnv env, MigrateReplicationQueueFromZkToTableState state) @@ -195,52 +208,23 @@ protected Flow executeFromState(MasterProcedureEnv env, setNextState(MIGRATE_REPLICATION_QUEUE_FROM_ZK_TO_TABLE_MIGRATE); return Flow.HAS_MORE_STATE; case MIGRATE_REPLICATION_QUEUE_FROM_ZK_TO_TABLE_MIGRATE: - if (future != null) { - // should have finished when we arrive here - assert future.isDone(); - try { - future.get(); - } catch (Exception e) { - future = null; - throw suspend(env.getMasterConfiguration(), - backoff -> LOG.warn("failed to migrate queue data, sleep {} secs and retry later", - backoff / 1000, e)); + try { + if ( + ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, + this::finishMigartion) + ) { + return Flow.HAS_MORE_STATE; } - shutdownExecutorService(); - setNextState(MIGRATE_REPLICATION_QUEUE_FROM_ZK_TO_TABLE_WAIT_UPGRADING); - resetRetry(); - return Flow.HAS_MORE_STATE; + ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, + env.getReplicationPeerManager() + .migrateQueuesFromZk(env.getMasterServices().getZooKeeper(), getExecutorService()), + env, this::finishMigartion); + } catch (IOException e) { + throw suspend(env.getMasterConfiguration(), + backoff -> LOG.warn("failed to migrate queue data, sleep {} secs and retry later", + backoff / 1000, e)); } - future = env.getReplicationPeerManager() - .migrateQueuesFromZk(env.getMasterServices().getZooKeeper(), getExecutorService()); - FutureUtils.addListener(future, (r, e) -> { - // should acquire procedure execution lock to make sure that the procedure executor has - // finished putting this procedure to the WAITING_TIMEOUT state, otherwise there could be - // race and cause unexpected result - IdLock procLock = - env.getMasterServices().getMasterProcedureExecutor().getProcExecutionLock(); - IdLock.Entry lockEntry; - try { - lockEntry = procLock.getLockEntry(getProcId()); - } catch (IOException ioe) { - LOG.error("Error while acquiring execution lock for procedure {}" - + " when trying to wake it up, aborting...", this, ioe); - env.getMasterServices().abort("Can not acquire procedure execution lock", e); - return; - } - try { - setTimeoutFailure(env); - } finally { - procLock.releaseLockEntry(lockEntry); - } - }); - // here we set timeout to -1 so the ProcedureExecutor will not schedule a Timer for us - setTimeout(-1); - setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); - // skip persistence is a must now since when restarting, if the procedure is in - // WAITING_TIMEOUT state and has -1 as timeout, it will block there forever... - skipPersistence(); - throw new ProcedureSuspendedException(); + return Flow.HAS_MORE_STATE; case MIGRATE_REPLICATION_QUEUE_FROM_ZK_TO_TABLE_WAIT_UPGRADING: long rsWithLowerVersion = env.getMasterServices().getServerManager().getOnlineServers().values().stream() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 988c519f781d..322b5bb7fc78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -797,7 +797,7 @@ private CompletableFuture runAsync(ExceptionalRunnable task, ExecutorService /** * Submit the migration tasks to the given {@code executor}. */ - CompletableFuture migrateQueuesFromZk(ZKWatcher zookeeper, ExecutorService executor) { + CompletableFuture migrateQueuesFromZk(ZKWatcher zookeeper, ExecutorService executor) { // the replication queue table creation is asynchronous and will be triggered by addPeer, so // here we need to manually initialize it since we will not call addPeer. try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java new file mode 100644 index 000000000000..8ca4cba245da --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import java.util.function.Supplier; +import org.apache.commons.lang3.mutable.MutableBoolean; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.IdLock; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A helper class for switching procedure out(yielding) while it is doing some time consuming + * operation, such as updating meta, where we can get a {@link CompletableFuture} about the + * operation. + */ +@InterfaceAudience.Private +public final class ProcedureFutureUtil { + + private static final Logger LOG = LoggerFactory.getLogger(ProcedureFutureUtil.class); + + private ProcedureFutureUtil() { + } + + public static boolean checkFuture(Procedure proc, Supplier> getFuture, + Consumer> setFuture, Runnable actionAfterDone) throws IOException { + CompletableFuture future = getFuture.get(); + if (future == null) { + return false; + } + // reset future + setFuture.accept(null); + FutureUtils.get(future); + actionAfterDone.run(); + return true; + } + + public static void suspendIfNecessary(Procedure proc, + Consumer> setFuture, CompletableFuture future, + MasterProcedureEnv env, Runnable actionAfterDone) + throws IOException, ProcedureSuspendedException { + MutableBoolean completed = new MutableBoolean(false); + Thread currentThread = Thread.currentThread(); + FutureUtils.addListener(future, (r, e) -> { + if (Thread.currentThread() == currentThread) { + LOG.debug("The future has completed while adding callback, give up suspending procedure {}", + proc); + // this means the future has already been completed, as we call the callback directly while + // calling addListener, so here we just set completed to true without doing anything + completed.setTrue(); + return; + } + LOG.debug("Going to wake up procedure {} because future has completed", proc); + // This callback may be called inside netty's event loop, so we should not block it for a long + // time. The worker executor will hold the execution lock while executing the procedure, and + // we may persist the procedure state inside the lock, which is a time consuming operation. + // And what makes things worse is that, we persist procedure state to master local region, + // where the AsyncFSWAL implementation will use the same netty's event loop for dealing with + // I/O, which could even cause dead lock. + env.getAsyncTaskExecutor().execute(() -> { + // should acquire procedure execution lock to make sure that the procedure executor has + // finished putting this procedure to the WAITING_TIMEOUT state, otherwise there could be + // race and cause unexpected result + IdLock procLock = + env.getMasterServices().getMasterProcedureExecutor().getProcExecutionLock(); + IdLock.Entry lockEntry; + try { + lockEntry = procLock.getLockEntry(proc.getProcId()); + } catch (IOException ioe) { + LOG.error("Error while acquiring execution lock for procedure {}" + + " when trying to wake it up, aborting...", proc, ioe); + env.getMasterServices().abort("Can not acquire procedure execution lock", e); + return; + } + try { + env.getProcedureScheduler().addFront(proc); + } finally { + procLock.releaseLockEntry(lockEntry); + } + }); + }); + if (completed.getValue()) { + FutureUtils.get(future); + actionAfterDone.run(); + } else { + // suspend the procedure + setFuture.accept(future); + proc.skipPersistence(); + throw new ProcedureSuspendedException(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index c601425e5f0a..6bc4c9d14e6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -27,6 +27,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateManager; @@ -305,7 +306,8 @@ public MockRegionStateStore(MasterServices master, MasterRegion masterRegion) { } @Override - public void updateRegionLocation(RegionStateNode regionNode) throws IOException { + public CompletableFuture updateRegionLocation(RegionStateNode regionNode) { + return CompletableFuture.completedFuture(null); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java index eb6f069474a9..69381b37e38c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; -import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -80,7 +79,7 @@ public void tearDownAfterTest() throws IOException { for (RegionInfo region : UTIL.getAdmin().getRegions(TABLE_NAME)) { RegionStateNode regionNode = AM.getRegionStates().getRegionStateNode(region); // confirm that we have released the lock - assertFalse(((ReentrantLock) regionNode.lock).isLocked()); + assertFalse(regionNode.isLocked()); TransitRegionStateProcedure proc = regionNode.getProcedure(); if (proc != null) { regionNode.unsetProcedure(proc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java index 2757e0dd9f20..2f88f6087dd4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FutureUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -61,11 +62,11 @@ public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion } @Override - void persistToMeta(RegionStateNode regionNode) throws IOException { + CompletableFuture persistToMeta(RegionStateNode regionNode) { if (FAIL) { - throw new IOException("Inject Error!"); + return FutureUtils.failedFuture(new IOException("Inject Error!")); } - super.persistToMeta(regionNode); + return super.persistToMeta(regionNode); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java index 1d13912fb72c..05179c5eadb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; @@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -76,8 +78,14 @@ public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion } @Override - void regionOpening(RegionStateNode regionNode) throws IOException { - super.regionOpening(regionNode); + CompletableFuture regionOpening(RegionStateNode regionNode) { + CompletableFuture future = super.regionOpening(regionNode); + try { + // wait until the operation done, then trigger later processing, to make the test more + // stable + FutureUtils.get(future); + } catch (IOException e) { + } if (regionNode.getRegionInfo().getTable().equals(NAME) && ARRIVE_REGION_OPENING != null) { ARRIVE_REGION_OPENING.countDown(); ARRIVE_REGION_OPENING = null; @@ -86,6 +94,7 @@ void regionOpening(RegionStateNode regionNode) throws IOException { } catch (InterruptedException e) { } } + return future; } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateNodeLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateNodeLock.java new file mode 100644 index 000000000000..c308b69c98cf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateNodeLock.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.AtomicUtils; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, SmallTests.class }) +public class TestRegionStateNodeLock { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionStateNodeLock.class); + + private final RegionInfo regionInfo = + RegionInfoBuilder.newBuilder(TableName.valueOf("test")).build(); + + private RegionStateNodeLock lock; + + @Before + public void setUp() { + lock = new RegionStateNodeLock(regionInfo); + } + + @Test + public void testLockByThread() { + assertFalse(lock.isLocked()); + assertThrows(IllegalMonitorStateException.class, () -> lock.unlock()); + lock.lock(); + assertTrue(lock.isLocked()); + // reentrant + assertTrue(lock.tryLock()); + lock.unlock(); + assertTrue(lock.isLocked()); + lock.unlock(); + assertFalse(lock.isLocked()); + } + + @Test + public void testLockByProc() { + NoopProcedure proc = new NoopProcedure(); + assertFalse(lock.isLocked()); + assertThrows(IllegalMonitorStateException.class, () -> lock.unlock(proc)); + lock.lock(proc); + assertTrue(lock.isLocked()); + // reentrant + assertTrue(lock.tryLock(proc)); + lock.unlock(proc); + assertTrue(lock.isLocked()); + lock.unlock(proc); + assertFalse(lock.isLocked()); + } + + @Test + public void testLockProcThenThread() { + NoopProcedure proc = new NoopProcedure(); + assertFalse(lock.isLocked()); + lock.lock(proc); + assertFalse(lock.tryLock()); + assertThrows(IllegalMonitorStateException.class, () -> lock.unlock()); + long startNs = System.nanoTime(); + new Thread(() -> { + Threads.sleepWithoutInterrupt(2000); + lock.unlock(proc); + }).start(); + lock.lock(); + long costNs = System.nanoTime() - startNs; + assertThat(TimeUnit.NANOSECONDS.toMillis(costNs), greaterThanOrEqualTo(1800L)); + assertTrue(lock.isLocked()); + lock.unlock(); + assertFalse(lock.isLocked()); + } + + @Test + public void testLockMultiThread() throws InterruptedException { + int nThreads = 10; + AtomicLong concurrency = new AtomicLong(0); + AtomicLong maxConcurrency = new AtomicLong(0); + Thread[] threads = new Thread[nThreads]; + for (int i = 0; i < nThreads; i++) { + threads[i] = new Thread(() -> { + for (int j = 0; j < 1000; j++) { + lock.lock(); + try { + long c = concurrency.incrementAndGet(); + AtomicUtils.updateMax(maxConcurrency, c); + concurrency.decrementAndGet(); + } finally { + lock.unlock(); + } + Threads.sleepWithoutInterrupt(1); + } + }); + } + for (Thread t : threads) { + t.start(); + } + for (Thread t : threads) { + t.join(); + } + assertEquals(0, concurrency.get()); + assertEquals(1, maxConcurrency.get()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java index 3d1a2c4caa94..cd73e09af6db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FutureUtils; import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; @@ -85,7 +87,7 @@ public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion } @Override - void persistToMeta(RegionStateNode regionNode) throws IOException { + CompletableFuture persistToMeta(RegionStateNode regionNode) { TransitRegionStateProcedure proc = regionNode.getProcedure(); if (!regionNode.getRegionInfo().isMetaRegion() && proc.hasParent()) { Procedure p = @@ -96,10 +98,10 @@ void persistToMeta(RegionStateNode regionNode) throws IOException { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdateInRollback( getMaster().getMasterProcedureExecutor(), true); } - throw new RuntimeException("inject code bug"); + return FutureUtils.failedFuture(new RuntimeException("inject code bug")); } } - super.persistToMeta(regionNode); + return super.persistToMeta(regionNode); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java index 3319a761eb4c..d1e7dc147615 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -55,10 +56,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; + /** * Test to ensure that the priority for procedures and stuck checker can partially solve the problem * describe in HBASE-19976, that is, RecoverMetaProcedure can finally be executed within a certain * period of time. + *

+ * As of HBASE-28199, we no longer block a worker when updating meta now, so this test can not test + * adding procedure worker now, but it could still be used to make sure that we could make progress + * when meta is gone and we have a lot of pending TRSPs. */ @Category({ MasterTests.class, LargeTests.class }) public class TestProcedurePriority { @@ -129,6 +136,7 @@ public static void setUp() throws Exception { } UTIL.getAdmin().balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(true).build()); UTIL.waitUntilNoRegionsInTransition(); + UTIL.getAdmin().balancerSwitch(false, true); } @AfterClass @@ -144,22 +152,26 @@ public void test() throws Exception { HRegionServer rsNoMeta = UTIL.getOtherRegionServer(rsWithMetaThread.getRegionServer()); FAIL = true; UTIL.getMiniHBaseCluster().killRegionServer(rsNoMeta.getServerName()); - // wait until all the worker thread are stuck, which means that the stuck checker will start to - // add new worker thread. ProcedureExecutor executor = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + // wait until we have way more TRSPs than the core pool size, and then make sure we can recover + // normally UTIL.waitFor(60000, new ExplainingPredicate() { @Override public boolean evaluate() throws Exception { - return executor.getWorkerThreadCount() > CORE_POOL_SIZE; + return executor.getProcedures().stream().filter(p -> !p.isFinished()) + .filter(p -> p.getState() != ProcedureState.INITIALIZING) + .filter(p -> p instanceof TransitRegionStateProcedure).count() > 5 * CORE_POOL_SIZE; } @Override public String explainFailure() throws Exception { - return "Stuck checker does not add new worker thread"; + return "Not enough TRSPs scheduled"; } }); + // sleep more time to make sure the TRSPs have been executed + Thread.sleep(10000); UTIL.getMiniHBaseCluster().killRegionServer(rsWithMetaThread.getRegionServer().getServerName()); rsWithMetaThread.join(); FAIL = false; From bc0f7a41b38de453bf3cc188d54d227e0b7f3ba9 Mon Sep 17 00:00:00 2001 From: vinayak hegde Date: Mon, 4 Dec 2023 16:33:34 +0530 Subject: [PATCH 161/514] HBASE-28209: Create a jmx metrics to expose the oldWALs directory size (#5528) Signed-off-by: Wellington Chevreuil --- .../org/apache/hadoop/hbase/HConstants.java | 7 ++ .../src/main/resources/hbase-default.xml | 6 ++ .../hbase/master/MetricsMasterSource.java | 2 + .../hbase/master/MetricsMasterSourceImpl.java | 4 +- .../hbase/master/MetricsMasterWrapper.java | 5 ++ .../apache/hadoop/hbase/master/HMaster.java | 6 ++ .../hadoop/hbase/master/MasterWalManager.java | 12 +++ .../master/MetricsMasterWrapperImpl.java | 8 ++ .../hbase/master/OldWALsDirSizeChore.java | 53 +++++++++++ .../hbase/master/TestMasterMetrics.java | 2 + .../master/TestMasterMetricsWrapper.java | 1 + .../hbase/master/TestOldWALsDirSizeChore.java | 90 +++++++++++++++++++ .../asciidoc/_chapters/hbase-default.adoc | 11 +++ 13 files changed, 206 insertions(+), 1 deletion(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/OldWALsDirSizeChore.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOldWALsDirSizeChore.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 2aa9ecf69ec4..5b53d2b2c0d3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -153,6 +153,13 @@ public enum OperationStatusCode { /** Default value for the balancer period */ public static final int DEFAULT_HBASE_BALANCER_PERIOD = 300000; + /** Config for the oldWALs directory size updater period */ + public static final String HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = + "hbase.master.oldwals.dir.updater.period"; + + /** Default value for the oldWALs directory size updater period */ + public static final int DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = 300000; + /** * Config key for enable/disable automatically separate child regions to different region servers * in the procedure of split regions. One child will be kept to the server where parent region is diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 17a9853d2ad3..1bf63b136e04 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -606,6 +606,12 @@ possible configurations would overwhelm and obscure the important. Period at which the region balancer runs in the Master, in milliseconds. + + hbase.master.oldwals.dir.updater.period + 300000 + Period at which the oldWALs directory size calculator/updater will run in the + Master, in milliseconds. + hbase.regions.slop 0.2 diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 4a5b97ae66bf..d606ed630881 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -70,6 +70,7 @@ public interface MetricsMasterSource extends BaseSource { String CLUSTER_REQUESTS_NAME = "clusterRequests"; String CLUSTER_READ_REQUESTS_NAME = "clusterReadRequests"; String CLUSTER_WRITE_REQUESTS_NAME = "clusterWriteRequests"; + String OLD_WAL_DIR_SIZE_NAME = "oldWALsDirSize"; String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; String MASTER_START_TIME_DESC = "Master Start Time"; String MASTER_FINISHED_INITIALIZATION_TIME_DESC = @@ -91,6 +92,7 @@ public interface MetricsMasterSource extends BaseSource { String OFFLINE_REGION_COUNT_DESC = "Number of Offline Regions"; String SERVER_CRASH_METRIC_PREFIX = "serverCrash"; + String OLD_WAL_DIR_SIZE_DESC = "size of old WALs directory in bytes"; /** * Increment the number of requests the cluster has seen. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index e0abf77bea44..011e66312aa3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -129,7 +129,9 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), - String.valueOf(masterWrapper.getIsActiveMaster())); + String.valueOf(masterWrapper.getIsActiveMaster())) + .addGauge(Interns.info(OLD_WAL_DIR_SIZE_NAME, OLD_WAL_DIR_SIZE_DESC), + masterWrapper.getOldWALsDirSize()); } metricsRegistry.snapshot(metricsRecordBuilder, all); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java index a900edf115e3..83419e2d5501 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java @@ -153,4 +153,9 @@ public interface MetricsMasterWrapper { * @return pair of count for online regions and offline regions */ PairOfSameType getRegionCounts(); + + /** + * Get the size of old WALs directory in bytes. + */ + long getOldWALsDirSize(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 0dca3a0111e3..b492b177e426 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -458,6 +458,7 @@ public class HMaster extends HBaseServerBase implements Maste private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier; private QuotaObserverChore quotaObserverChore; private SnapshotQuotaObserverChore snapshotQuotaChore; + private OldWALsDirSizeChore oldWALsDirSizeChore; private ProcedureExecutor procedureExecutor; private ProcedureStore procedureStore; @@ -1362,6 +1363,10 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE this.rollingUpgradeChore = new RollingUpgradeChore(this); getChoreService().scheduleChore(rollingUpgradeChore); + + this.oldWALsDirSizeChore = new OldWALsDirSizeChore(this); + getChoreService().scheduleChore(this.oldWALsDirSizeChore); + status.markComplete("Progress after master initialized complete"); } @@ -1894,6 +1899,7 @@ protected void stopChores() { shutdownChore(hbckChore); shutdownChore(regionsRecoveryChore); shutdownChore(rollingUpgradeChore); + shutdownChore(oldWALsDirSizeChore); } /** Returns Get remote side's InetAddress */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index aca04a8ac83a..a2c929cb79d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -89,6 +89,9 @@ public boolean accept(Path p) { // create the split log lock private final Lock splitLogLock = new ReentrantLock(); + // old WALs directory size in bytes + private long oldWALsDirSize; + /** * Superceded by {@link SplitWALManager}; i.e. procedure-based WAL splitting rather than 'classic' * zk-coordinated WAL splitting. @@ -114,6 +117,7 @@ public MasterWalManager(Configuration conf, FileSystem fs, Path rootDir, MasterS this.services = services; this.splitLogManager = new SplitLogManager(services, conf); this.oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + this.oldWALsDirSize = 0; } public void stop() { @@ -134,6 +138,14 @@ Path getOldLogDir() { return this.oldLogDir; } + public void updateOldWALsDirSize() throws IOException { + this.oldWALsDirSize = fs.getContentSummary(this.oldLogDir).getLength(); + } + + public long getOldWALsDirSize() { + return this.oldWALsDirSize; + } + public FileSystem getFileSystem() { return this.fs; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java index 923c663807f1..ff6f5b8e5df8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java @@ -238,4 +238,12 @@ public PairOfSameType getRegionCounts() { return new PairOfSameType<>(0, 0); } } + + @Override + public long getOldWALsDirSize() { + if (master == null || !master.isInitialized()) { + return 0; + } + return master.getMasterWalManager().getOldWALsDirSize(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OldWALsDirSizeChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OldWALsDirSizeChore.java new file mode 100644 index 000000000000..b2f0622b7d28 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OldWALsDirSizeChore.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This chore is used to update the 'oldWALsDirSize' variable in {@link MasterWalManager} through + * the {@link MasterWalManager#updateOldWALsDirSize()} method. + */ +@InterfaceAudience.Private +public class OldWALsDirSizeChore extends ScheduledChore { + private static final Logger LOG = LoggerFactory.getLogger(OldWALsDirSizeChore.class); + + private final MasterServices master; + + public OldWALsDirSizeChore(MasterServices master) { + super(master.getServerName() + "-OldWALsDirSizeChore", master, + master.getConfiguration().getInt(HConstants.HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD, + HConstants.DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD)); + this.master = master; + } + + @Override + protected void chore() { + try { + this.master.getMasterWalManager().updateOldWALsDirSize(); + } catch (IOException e) { + LOG.error("Got exception while trying to update the old WALs Directory size counter: " + + e.getMessage(), e); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java index 3966b1f6bec9..09618b3d899e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java @@ -183,6 +183,8 @@ public void testDefaultMasterMetrics() throws Exception { metricsHelper.assertCounter(MetricsMasterSource.SERVER_CRASH_METRIC_PREFIX + "SubmittedCount", 0, masterSource); + metricsHelper.assertGauge("oldWALsDirSize", master.getMasterWalManager().getOldWALsDirSize(), + masterSource); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java index f73ebde89c11..d1389ee68e9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java @@ -85,6 +85,7 @@ public void testInfo() throws IOException { assertEquals(master.getMasterCoprocessors().length, info.getCoprocessors().length); assertEquals(master.getServerManager().getOnlineServersList().size(), info.getNumRegionServers()); + assertEquals(master.getMasterWalManager().getOldWALsDirSize(), info.getOldWALsDirSize()); int regionServerCount = NUM_RS; assertEquals(regionServerCount, info.getNumRegionServers()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOldWALsDirSizeChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOldWALsDirSizeChore.java new file mode 100644 index 000000000000..7bd4ec5a1c24 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOldWALsDirSizeChore.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.master.assignment.MockMasterServices; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tests for OldWALsDirSizeChore Here we are using the {@link MockMasterServices} to mock the Hbase + * Master. Chore's won't be running automatically; we need to run every time. + */ +@Category({ MasterTests.class, SmallTests.class }) +public class TestOldWALsDirSizeChore { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestOldWALsDirSizeChore.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestOldWALsDirSizeChore.class); + + private MockMasterServices master; + + private static final HBaseTestingUtil HBASE_TESTING_UTILITY = new HBaseTestingUtil(); + + @Before + public void setUp() throws Exception { + master = new MockMasterServices(HBASE_TESTING_UTILITY.getConfiguration()); + master.start(10, null); + } + + @After + public void tearDown() throws Exception { + master.stop("tearDown"); + } + + @Test + public void testOldWALsDirSizeChore() throws IOException { + // Assume the OldWALs directory size is initially zero as the chore hasn't run yet + long currentOldWALsDirSize = master.getMasterWalManager().getOldWALsDirSize(); + assertEquals("Initial OldWALs directory size should be zero before running the chore", 0, + currentOldWALsDirSize); + + int dummyFileSize = 50 * 1024 * 1024; // 50MB + byte[] dummyData = new byte[dummyFileSize]; + + // Create a dummy file in the OldWALs directory + Path dummyFileInOldWALsDir = new Path(master.getMasterWalManager().getOldLogDir(), "dummy.txt"); + try (FSDataOutputStream outputStream = + master.getMasterWalManager().getFileSystem().create(dummyFileInOldWALsDir)) { + outputStream.write(dummyData); + } + + // Run the OldWALsDirSizeChore to update the directory size + OldWALsDirSizeChore oldWALsDirSizeChore = new OldWALsDirSizeChore(master); + oldWALsDirSizeChore.chore(); + + // Verify that the OldWALs directory size has increased by the file size + assertEquals("OldWALs directory size after chore should be as expected", dummyFileSize, + master.getMasterWalManager().getOldWALsDirSize()); + } +} diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc index 69fb4a0eae66..03391cc38b1a 100644 --- a/src/main/asciidoc/_chapters/hbase-default.adoc +++ b/src/main/asciidoc/_chapters/hbase-default.adoc @@ -761,6 +761,17 @@ Period at which the region balancer runs in the Master. `300000` +[[hbase.master.oldwals.dir.updater.period]] +*`hbase.master.oldwals.dir.updater.period`*:: ++ +.Description +Period at which the oldWALs directory size calculator/updater will run in the Master. + ++ +.Default +`300000` + + [[hbase.regions.slop]] *`hbase.regions.slop`*:: + From 25e9228e2c0a9a752db02e48d55010e0197fd203 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 4 Dec 2023 09:25:09 -0500 Subject: [PATCH 162/514] HBASE-28215: region reopen procedure batching/throttling (#5534) Signed-off-by: Bryan Beaudreault --- .../procedure/ModifyTableProcedure.java | 14 +- .../ReopenTableRegionsProcedure.java | 151 ++++++++++-- ...openTableRegionsProcedureBatchBackoff.java | 103 +++++++++ ...stReopenTableRegionsProcedureBatching.java | 216 ++++++++++++++++++ 4 files changed, 458 insertions(+), 26 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatchBackoff.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index ff0d7d2cc94b..f7314349ee2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hbase.master.procedure; +import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT; +import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY; +import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_DISABLED; +import static org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY; + import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -26,6 +31,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ConcurrentTableModificationException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; @@ -219,7 +225,13 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: if (isTableEnabled(env)) { - addChildProcedure(new ReopenTableRegionsProcedure(getTableName())); + Configuration conf = env.getMasterConfiguration(); + long backoffMillis = conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, + PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT); + int batchSizeMax = + conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, PROGRESSIVE_BATCH_SIZE_MAX_DISABLED); + addChildProcedure( + new ReopenTableRegionsProcedure(getTableName(), backoffMillis, batchSizeMax)); } setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java index 4efb1768b0ce..353636e6ddd6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.procedure; +import com.google.errorprone.annotations.RestrictedApi; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -53,6 +54,17 @@ public class ReopenTableRegionsProcedure private static final Logger LOG = LoggerFactory.getLogger(ReopenTableRegionsProcedure.class); + public static final String PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY = + "hbase.reopen.table.regions.progressive.batch.backoff.ms"; + public static final long PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT = 0L; + public static final String PROGRESSIVE_BATCH_SIZE_MAX_KEY = + "hbase.reopen.table.regions.progressive.batch.size.max"; + public static final int PROGRESSIVE_BATCH_SIZE_MAX_DISABLED = -1; + private static final int PROGRESSIVE_BATCH_SIZE_MAX_DEFAULT_VALUE = Integer.MAX_VALUE; + + // this minimum prevents a max which would break this procedure + private static final int MINIMUM_BATCH_SIZE_MAX = 1; + private TableName tableName; // Specify specific regions of a table to reopen. @@ -61,20 +73,46 @@ public class ReopenTableRegionsProcedure private List regions = Collections.emptyList(); + private List currentRegionBatch = Collections.emptyList(); + private RetryCounter retryCounter; + private long reopenBatchBackoffMillis; + private int reopenBatchSize; + private int reopenBatchSizeMax; + private long regionsReopened = 0; + private long batchesProcessed = 0; + public ReopenTableRegionsProcedure() { - regionNames = Collections.emptyList(); + this(null); } public ReopenTableRegionsProcedure(TableName tableName) { - this.tableName = tableName; - this.regionNames = Collections.emptyList(); + this(tableName, Collections.emptyList()); } public ReopenTableRegionsProcedure(final TableName tableName, final List regionNames) { + this(tableName, regionNames, PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT, + PROGRESSIVE_BATCH_SIZE_MAX_DISABLED); + } + + public ReopenTableRegionsProcedure(final TableName tableName, long reopenBatchBackoffMillis, + int reopenBatchSizeMax) { + this(tableName, Collections.emptyList(), reopenBatchBackoffMillis, reopenBatchSizeMax); + } + + public ReopenTableRegionsProcedure(final TableName tableName, final List regionNames, + long reopenBatchBackoffMillis, int reopenBatchSizeMax) { this.tableName = tableName; this.regionNames = regionNames; + this.reopenBatchBackoffMillis = reopenBatchBackoffMillis; + if (reopenBatchSizeMax == PROGRESSIVE_BATCH_SIZE_MAX_DISABLED) { + this.reopenBatchSize = Integer.MAX_VALUE; + this.reopenBatchSizeMax = Integer.MAX_VALUE; + } else { + this.reopenBatchSize = 1; + this.reopenBatchSizeMax = Math.max(reopenBatchSizeMax, MINIMUM_BATCH_SIZE_MAX); + } } @Override @@ -87,6 +125,30 @@ public TableOperationType getTableOperationType() { return TableOperationType.REGION_EDIT; } + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public long getRegionsReopened() { + return regionsReopened; + } + + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public long getBatchesProcessed() { + return batchesProcessed; + } + + @RestrictedApi(explanation = "Should only be called internally or in tests", link = "", + allowedOnPath = ".*(/src/test/.*|ReopenTableRegionsProcedure).java") + protected int progressBatchSize() { + int previousBatchSize = reopenBatchSize; + reopenBatchSize = Math.min(reopenBatchSizeMax, 2 * reopenBatchSize); + if (reopenBatchSize < previousBatchSize) { + // the batch size should never decrease. this must be overflow, so just use max + reopenBatchSize = reopenBatchSizeMax; + } + return reopenBatchSize; + } + private boolean canSchedule(MasterProcedureEnv env, HRegionLocation loc) { if (loc.getSeqNum() < 0) { return false; @@ -114,7 +176,13 @@ protected Flow executeFromState(MasterProcedureEnv env, ReopenTableRegionsState setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_REOPEN_REGIONS); return Flow.HAS_MORE_STATE; case REOPEN_TABLE_REGIONS_REOPEN_REGIONS: - for (HRegionLocation loc : regions) { + // if we didn't finish reopening the last batch yet, let's keep trying until we do. + // at that point, the batch will be empty and we can generate a new batch + if (!regions.isEmpty() && currentRegionBatch.isEmpty()) { + currentRegionBatch = regions.stream().limit(reopenBatchSize).collect(Collectors.toList()); + batchesProcessed++; + } + for (HRegionLocation loc : currentRegionBatch) { RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getRegionStateNode(loc.getRegion()); // this possible, maybe the region has already been merged or split, see HBASE-20921 @@ -133,39 +201,72 @@ protected Flow executeFromState(MasterProcedureEnv env, ReopenTableRegionsState regionNode.unlock(); } addChildProcedure(proc); + regionsReopened++; } setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_CONFIRM_REOPENED); return Flow.HAS_MORE_STATE; case REOPEN_TABLE_REGIONS_CONFIRM_REOPENED: - regions = regions.stream().map(env.getAssignmentManager().getRegionStates()::checkReopened) - .filter(l -> l != null).collect(Collectors.toList()); + // update region lists based on what's been reopened + regions = filterReopened(env, regions); + currentRegionBatch = filterReopened(env, currentRegionBatch); + + // existing batch didn't fully reopen, so try to resolve that first. + // since this is a retry, don't do the batch backoff + if (!currentRegionBatch.isEmpty()) { + return reopenIfSchedulable(env, currentRegionBatch, false); + } + if (regions.isEmpty()) { return Flow.NO_MORE_STATE; } - if (regions.stream().anyMatch(loc -> canSchedule(env, loc))) { - retryCounter = null; - setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_REOPEN_REGIONS); - return Flow.HAS_MORE_STATE; - } - // We can not schedule TRSP for all the regions need to reopen, wait for a while and retry - // again. - if (retryCounter == null) { - retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); - } - long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.info( - "There are still {} region(s) which need to be reopened for table {} are in " - + "OPENING state, suspend {}secs and try again later", - regions.size(), tableName, backoff / 1000); - setTimeout(Math.toIntExact(backoff)); - setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); - skipPersistence(); - throw new ProcedureSuspendedException(); + + // current batch is finished, schedule more regions + return reopenIfSchedulable(env, regions, true); default: throw new UnsupportedOperationException("unhandled state=" + state); } } + private List filterReopened(MasterProcedureEnv env, + List regionsToCheck) { + return regionsToCheck.stream().map(env.getAssignmentManager().getRegionStates()::checkReopened) + .filter(l -> l != null).collect(Collectors.toList()); + } + + private Flow reopenIfSchedulable(MasterProcedureEnv env, List regionsToReopen, + boolean shouldBatchBackoff) throws ProcedureSuspendedException { + if (regionsToReopen.stream().anyMatch(loc -> canSchedule(env, loc))) { + retryCounter = null; + setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_REOPEN_REGIONS); + if (shouldBatchBackoff && reopenBatchBackoffMillis > 0) { + progressBatchSize(); + setBackoffState(reopenBatchBackoffMillis); + throw new ProcedureSuspendedException(); + } else { + return Flow.HAS_MORE_STATE; + } + } + + // We can not schedule TRSP for all the regions need to reopen, wait for a while and retry + // again. + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoffMillis = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.info( + "There are still {} region(s) which need to be reopened for table {}. {} are in " + + "OPENING state, suspend {}secs and try again later", + regions.size(), tableName, currentRegionBatch.size(), backoffMillis / 1000); + setBackoffState(backoffMillis); + throw new ProcedureSuspendedException(); + } + + private void setBackoffState(long millis) { + setTimeout(Math.toIntExact(millis)); + setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); + skipPersistence(); + } + private List getRegionLocationsForReopen(List tableRegionsForReopen) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatchBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatchBackoff.java new file mode 100644 index 000000000000..fbabb1fa22cc --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatchBackoff.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Confirm that we will rate limit reopen batches when reopening all table regions. This can avoid + * the pain associated with reopening too many regions at once. + */ +@Category({ MasterTests.class, MediumTests.class }) +public class TestReopenTableRegionsProcedureBatchBackoff { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureBatchBackoff.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static TableName TABLE_NAME = TableName.valueOf("BatchBackoff"); + private static final int BACKOFF_MILLIS_PER_RS = 3_000; + private static final int REOPEN_BATCH_SIZE = 1; + + private static byte[] CF = Bytes.toBytes("cf"); + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); + UTIL.startMiniCluster(1); + UTIL.createMultiRegionTable(TABLE_NAME, CF, 10); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testRegionBatchBackoff() throws IOException { + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + List regions = UTIL.getAdmin().getRegions(TABLE_NAME); + assertTrue(10 <= regions.size()); + ReopenTableRegionsProcedure proc = + new ReopenTableRegionsProcedure(TABLE_NAME, BACKOFF_MILLIS_PER_RS, REOPEN_BATCH_SIZE); + procExec.submitProcedure(proc); + Instant startedAt = Instant.now(); + ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60_000); + Instant stoppedAt = Instant.now(); + assertTrue(Duration.between(startedAt, stoppedAt).toMillis() + > (long) regions.size() * BACKOFF_MILLIS_PER_RS); + } + + @Test + public void testRegionBatchNoBackoff() throws IOException { + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + List regions = UTIL.getAdmin().getRegions(TABLE_NAME); + assertTrue(10 <= regions.size()); + int noBackoffMillis = 0; + ReopenTableRegionsProcedure proc = + new ReopenTableRegionsProcedure(TABLE_NAME, noBackoffMillis, REOPEN_BATCH_SIZE); + procExec.submitProcedure(proc); + ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, + (long) regions.size() * BACKOFF_MILLIS_PER_RS); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java new file mode 100644 index 000000000000..8ea9b3c6a309 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; + +/** + * Confirm that we will batch region reopens when reopening all table regions. This can avoid the + * pain associated with reopening too many regions at once. + */ +@Category({ MasterTests.class, MediumTests.class }) +public class TestReopenTableRegionsProcedureBatching { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureBatching.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + private static final int BACKOFF_MILLIS_PER_RS = 0; + private static final int REOPEN_BATCH_SIZE_MAX = 1; + + private static TableName TABLE_NAME = TableName.valueOf("Batching"); + + private static byte[] CF = Bytes.toBytes("cf"); + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); + UTIL.startMiniCluster(1); + UTIL.createMultiRegionTable(TABLE_NAME, CF); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testSmallMaxBatchSize() throws IOException { + AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + List regions = UTIL.getAdmin().getRegions(TABLE_NAME); + assertTrue(2 <= regions.size()); + Set stuckRegions = + regions.stream().map(r -> stickRegion(am, procExec, r)).collect(Collectors.toSet()); + ReopenTableRegionsProcedure proc = + new ReopenTableRegionsProcedure(TABLE_NAME, BACKOFF_MILLIS_PER_RS, REOPEN_BATCH_SIZE_MAX); + procExec.submitProcedure(proc); + UTIL.waitFor(10000, () -> proc.getState() == ProcedureState.WAITING_TIMEOUT); + + // the first batch should be small + confirmBatchSize(1, stuckRegions, proc); + ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60_000); + + // other batches should also be small + assertTrue(proc.getBatchesProcessed() >= regions.size()); + + // all regions should only be opened once + assertEquals(proc.getRegionsReopened(), regions.size()); + } + + @Test + public void testDefaultMaxBatchSize() throws IOException { + AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + List regions = UTIL.getAdmin().getRegions(TABLE_NAME); + assertTrue(2 <= regions.size()); + Set stuckRegions = + regions.stream().map(r -> stickRegion(am, procExec, r)).collect(Collectors.toSet()); + ReopenTableRegionsProcedure proc = new ReopenTableRegionsProcedure(TABLE_NAME); + procExec.submitProcedure(proc); + UTIL.waitFor(10000, () -> proc.getState() == ProcedureState.WAITING_TIMEOUT); + + // the first batch should be large + confirmBatchSize(regions.size(), stuckRegions, proc); + ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60_000); + + // all regions should only be opened once + assertEquals(proc.getRegionsReopened(), regions.size()); + } + + @Test + public void testNegativeBatchSizeDoesNotBreak() throws IOException { + AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + List regions = UTIL.getAdmin().getRegions(TABLE_NAME); + assertTrue(2 <= regions.size()); + Set stuckRegions = + regions.stream().map(r -> stickRegion(am, procExec, r)).collect(Collectors.toSet()); + ReopenTableRegionsProcedure proc = + new ReopenTableRegionsProcedure(TABLE_NAME, BACKOFF_MILLIS_PER_RS, -100); + procExec.submitProcedure(proc); + UTIL.waitFor(10000, () -> proc.getState() == ProcedureState.WAITING_TIMEOUT); + + // the first batch should be small + confirmBatchSize(1, stuckRegions, proc); + ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60_000); + + // other batches should also be small + assertTrue(proc.getBatchesProcessed() >= regions.size()); + + // all regions should only be opened once + assertEquals(proc.getRegionsReopened(), regions.size()); + } + + @Test + public void testBatchSizeDoesNotOverflow() { + ReopenTableRegionsProcedure proc = + new ReopenTableRegionsProcedure(TABLE_NAME, BACKOFF_MILLIS_PER_RS, Integer.MAX_VALUE); + int currentBatchSize = 1; + while (currentBatchSize < Integer.MAX_VALUE) { + currentBatchSize = proc.progressBatchSize(); + assertTrue(currentBatchSize > 0); + } + } + + private void confirmBatchSize(int expectedBatchSize, Set stuckRegions, + ReopenTableRegionsProcedure proc) { + while (true) { + if (proc.getBatchesProcessed() == 0) { + continue; + } + stuckRegions.forEach(this::unstickRegion); + UTIL.waitFor(5000, () -> expectedBatchSize == proc.getRegionsReopened()); + break; + } + } + + static class StuckRegion { + final TransitRegionStateProcedure trsp; + final RegionStateNode regionNode; + final long openSeqNum; + + public StuckRegion(TransitRegionStateProcedure trsp, RegionStateNode regionNode, + long openSeqNum) { + this.trsp = trsp; + this.regionNode = regionNode; + this.openSeqNum = openSeqNum; + } + } + + private StuckRegion stickRegion(AssignmentManager am, + ProcedureExecutor procExec, RegionInfo regionInfo) { + RegionStateNode regionNode = am.getRegionStates().getRegionStateNode(regionInfo); + TransitRegionStateProcedure trsp = + TransitRegionStateProcedure.unassign(procExec.getEnvironment(), regionInfo); + regionNode.lock(); + long openSeqNum; + try { + openSeqNum = regionNode.getOpenSeqNum(); + regionNode.setState(State.OPENING); + regionNode.setOpenSeqNum(-1L); + regionNode.setProcedure(trsp); + } finally { + regionNode.unlock(); + } + return new StuckRegion(trsp, regionNode, openSeqNum); + } + + private void unstickRegion(StuckRegion stuckRegion) { + stuckRegion.regionNode.lock(); + try { + stuckRegion.regionNode.setState(State.OPEN); + stuckRegion.regionNode.setOpenSeqNum(stuckRegion.openSeqNum); + stuckRegion.regionNode.unsetProcedure(stuckRegion.trsp); + } finally { + stuckRegion.regionNode.unlock(); + } + } +} From 6e421e9d94ac092e5381021926f203fc57b76f0a Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Wed, 6 Dec 2023 12:53:32 -0500 Subject: [PATCH 163/514] HBASE-28206 [JDK17] JVM crashes intermittently on aarch64 (#5561) Signed-off-by: Duo Zhang --- .../MetricsRegionServerWrapperImpl.java | 639 +++++++++--------- .../TestMetricsRegionServerAggregate.java | 227 +++++++ 2 files changed, 535 insertions(+), 331 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerAggregate.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 6c7fc504b5fd..ef0ee71f7fdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.OptionalDouble; -import java.util.OptionalLong; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -72,62 +71,16 @@ class MetricsRegionServerWrapperImpl implements MetricsRegionServerWrapper { private CacheStats cacheStats; private CacheStats l1Stats = null; private CacheStats l2Stats = null; - - private volatile long numStores = 0; private volatile long numWALFiles = 0; private volatile long walFileSize = 0; - private volatile long numStoreFiles = 0; - private volatile long memstoreSize = 0; - private volatile long onHeapMemstoreSize = 0; - private volatile long offHeapMemstoreSize = 0; - private volatile long storeFileSize = 0; - private volatile double storeFileSizeGrowthRate = 0; - private volatile long maxStoreFileCount = 0; - private volatile long maxStoreFileAge = 0; - private volatile long minStoreFileAge = 0; - private volatile long avgStoreFileAge = 0; - private volatile long numReferenceFiles = 0; - private volatile double requestsPerSecond = 0.0; - private volatile long readRequestsCount = 0; - private volatile double readRequestsRatePerSecond = 0; - private volatile long cpRequestsCount = 0; - private volatile long filteredReadRequestsCount = 0; - private volatile long writeRequestsCount = 0; - private volatile double writeRequestsRatePerSecond = 0; - private volatile long checkAndMutateChecksFailed = 0; - private volatile long checkAndMutateChecksPassed = 0; - private volatile long storefileIndexSize = 0; - private volatile long totalStaticIndexSize = 0; - private volatile long totalStaticBloomSize = 0; - private volatile long bloomFilterRequestsCount = 0; - private volatile long bloomFilterNegativeResultsCount = 0; - private volatile long bloomFilterEligibleRequestsCount = 0; - private volatile long numMutationsWithoutWAL = 0; - private volatile long dataInMemoryWithoutWAL = 0; - private volatile double percentFileLocal = 0; - private volatile double percentFileLocalSecondaryRegions = 0; - private volatile long flushedCellsCount = 0; - private volatile long compactedCellsCount = 0; - private volatile long majorCompactedCellsCount = 0; - private volatile long flushedCellsSize = 0; - private volatile long compactedCellsSize = 0; - private volatile long majorCompactedCellsSize = 0; - private volatile long cellsCountCompactedToMob = 0; - private volatile long cellsCountCompactedFromMob = 0; - private volatile long cellsSizeCompactedToMob = 0; - private volatile long cellsSizeCompactedFromMob = 0; - private volatile long mobFlushCount = 0; - private volatile long mobFlushedCellsCount = 0; - private volatile long mobFlushedCellsSize = 0; - private volatile long mobScanCellsCount = 0; - private volatile long mobScanCellsSize = 0; private volatile long mobFileCacheAccessCount = 0; private volatile long mobFileCacheMissCount = 0; private volatile double mobFileCacheHitRatio = 0; private volatile long mobFileCacheEvictedCount = 0; private volatile long mobFileCacheCount = 0; - private volatile long blockedRequestsCount = 0L; - private volatile long averageRegionSize = 0L; + + private volatile RegionMetricAggregate aggregate = new RegionMetricAggregate(null); + protected final Map> requestsCountCache = new ConcurrentHashMap>(); @@ -249,7 +202,7 @@ public long getTotalRequestCount() { @Override public long getTotalRowActionRequestCount() { - return readRequestsCount + writeRequestsCount; + return aggregate.readRequestsCount + aggregate.writeRequestsCount; } @Override @@ -462,7 +415,7 @@ public void forceRecompute() { @Override public long getNumStores() { - return numStores; + return aggregate.numStores; } @Override @@ -491,92 +444,92 @@ public long getNumWALSlowAppend() { @Override public long getNumStoreFiles() { - return numStoreFiles; + return aggregate.numStoreFiles; } @Override public long getMaxStoreFiles() { - return maxStoreFileCount; + return aggregate.maxStoreFileCount; } @Override public long getMaxStoreFileAge() { - return maxStoreFileAge; + return aggregate.maxStoreFileAge; } @Override public long getMinStoreFileAge() { - return minStoreFileAge; + return aggregate.minStoreFileAge; } @Override public long getAvgStoreFileAge() { - return avgStoreFileAge; + return aggregate.avgStoreFileAge; } @Override public long getNumReferenceFiles() { - return numReferenceFiles; + return aggregate.numReferenceFiles; } @Override public long getMemStoreSize() { - return memstoreSize; + return aggregate.memstoreSize; } @Override public long getOnHeapMemStoreSize() { - return onHeapMemstoreSize; + return aggregate.onHeapMemstoreSize; } @Override public long getOffHeapMemStoreSize() { - return offHeapMemstoreSize; + return aggregate.offHeapMemstoreSize; } @Override public long getStoreFileSize() { - return storeFileSize; + return aggregate.storeFileSize; } @Override public double getStoreFileSizeGrowthRate() { - return storeFileSizeGrowthRate; + return aggregate.storeFileSizeGrowthRate; } @Override public double getRequestsPerSecond() { - return requestsPerSecond; + return aggregate.requestsPerSecond; } @Override public long getReadRequestsCount() { - return readRequestsCount; + return aggregate.readRequestsCount; } @Override public long getCpRequestsCount() { - return cpRequestsCount; + return aggregate.cpRequestsCount; } @Override public double getReadRequestsRatePerSecond() { - return readRequestsRatePerSecond; + return aggregate.readRequestsRatePerSecond; } @Override public long getFilteredReadRequestsCount() { - return filteredReadRequestsCount; + return aggregate.filteredReadRequestsCount; } @Override public long getWriteRequestsCount() { - return writeRequestsCount; + return aggregate.writeRequestsCount; } @Override public double getWriteRequestsRatePerSecond() { - return writeRequestsRatePerSecond; + return aggregate.writeRequestsRatePerSecond; } @Override @@ -606,62 +559,62 @@ public long getRpcMutateRequestsCount() { @Override public long getCheckAndMutateChecksFailed() { - return checkAndMutateChecksFailed; + return aggregate.checkAndMutateChecksFailed; } @Override public long getCheckAndMutateChecksPassed() { - return checkAndMutateChecksPassed; + return aggregate.checkAndMutateChecksPassed; } @Override public long getStoreFileIndexSize() { - return storefileIndexSize; + return aggregate.storefileIndexSize; } @Override public long getTotalStaticIndexSize() { - return totalStaticIndexSize; + return aggregate.totalStaticIndexSize; } @Override public long getTotalStaticBloomSize() { - return totalStaticBloomSize; + return aggregate.totalStaticBloomSize; } @Override public long getBloomFilterRequestsCount() { - return bloomFilterRequestsCount; + return aggregate.bloomFilterRequestsCount; } @Override public long getBloomFilterNegativeResultsCount() { - return bloomFilterNegativeResultsCount; + return aggregate.bloomFilterNegativeResultsCount; } @Override public long getBloomFilterEligibleRequestsCount() { - return bloomFilterEligibleRequestsCount; + return aggregate.bloomFilterEligibleRequestsCount; } @Override public long getNumMutationsWithoutWAL() { - return numMutationsWithoutWAL; + return aggregate.numMutationsWithoutWAL; } @Override public long getDataInMemoryWithoutWAL() { - return dataInMemoryWithoutWAL; + return aggregate.dataInMemoryWithoutWAL; } @Override public double getPercentFileLocal() { - return percentFileLocal; + return aggregate.percentFileLocal; } @Override public double getPercentFileLocalSecondaryRegions() { - return percentFileLocalSecondaryRegions; + return aggregate.percentFileLocalSecondaryRegions; } @Override @@ -674,77 +627,77 @@ public long getUpdatesBlockedTime() { @Override public long getFlushedCellsCount() { - return flushedCellsCount; + return aggregate.flushedCellsCount; } @Override public long getCompactedCellsCount() { - return compactedCellsCount; + return aggregate.compactedCellsCount; } @Override public long getMajorCompactedCellsCount() { - return majorCompactedCellsCount; + return aggregate.majorCompactedCellsCount; } @Override public long getFlushedCellsSize() { - return flushedCellsSize; + return aggregate.flushedCellsSize; } @Override public long getCompactedCellsSize() { - return compactedCellsSize; + return aggregate.compactedCellsSize; } @Override public long getMajorCompactedCellsSize() { - return majorCompactedCellsSize; + return aggregate.majorCompactedCellsSize; } @Override public long getCellsCountCompactedFromMob() { - return cellsCountCompactedFromMob; + return aggregate.cellsCountCompactedFromMob; } @Override public long getCellsCountCompactedToMob() { - return cellsCountCompactedToMob; + return aggregate.cellsCountCompactedToMob; } @Override public long getCellsSizeCompactedFromMob() { - return cellsSizeCompactedFromMob; + return aggregate.cellsSizeCompactedFromMob; } @Override public long getCellsSizeCompactedToMob() { - return cellsSizeCompactedToMob; + return aggregate.cellsSizeCompactedToMob; } @Override public long getMobFlushCount() { - return mobFlushCount; + return aggregate.mobFlushCount; } @Override public long getMobFlushedCellsCount() { - return mobFlushedCellsCount; + return aggregate.mobFlushedCellsCount; } @Override public long getMobFlushedCellsSize() { - return mobFlushedCellsSize; + return aggregate.mobFlushedCellsSize; } @Override public long getMobScanCellsCount() { - return mobScanCellsCount; + return aggregate.mobScanCellsCount; } @Override public long getMobScanCellsSize() { - return mobScanCellsSize; + return aggregate.mobScanCellsSize; } @Override @@ -777,6 +730,247 @@ public int getActiveScanners() { return regionServer.getRpcServices().getScannersCount(); } + private static final class RegionMetricAggregate { + private long numStores = 0; + private long numStoreFiles = 0; + private long memstoreSize = 0; + private long onHeapMemstoreSize = 0; + private long offHeapMemstoreSize = 0; + private long storeFileSize = 0; + private double storeFileSizeGrowthRate = 0; + private long maxStoreFileCount = 0; + private long maxStoreFileAge = 0; + private long minStoreFileAge = Long.MAX_VALUE; + private long avgStoreFileAge = 0; + private long numReferenceFiles = 0; + + private long cpRequestsCount = 0; + private double requestsPerSecond = 0.0; + private long readRequestsCount = 0; + private double readRequestsRatePerSecond = 0; + private long filteredReadRequestsCount = 0; + private long writeRequestsCount = 0; + private double writeRequestsRatePerSecond = 0; + private long checkAndMutateChecksFailed = 0; + private long checkAndMutateChecksPassed = 0; + private long storefileIndexSize = 0; + private long totalStaticIndexSize = 0; + private long totalStaticBloomSize = 0; + private long bloomFilterRequestsCount = 0; + private long bloomFilterNegativeResultsCount = 0; + private long bloomFilterEligibleRequestsCount = 0; + private long numMutationsWithoutWAL = 0; + private long dataInMemoryWithoutWAL = 0; + private double percentFileLocal = 0; + private double percentFileLocalSecondaryRegions = 0; + private long flushedCellsCount = 0; + private long compactedCellsCount = 0; + private long majorCompactedCellsCount = 0; + private long flushedCellsSize = 0; + private long compactedCellsSize = 0; + private long majorCompactedCellsSize = 0; + private long cellsCountCompactedToMob = 0; + private long cellsCountCompactedFromMob = 0; + private long cellsSizeCompactedToMob = 0; + private long cellsSizeCompactedFromMob = 0; + private long mobFlushCount = 0; + private long mobFlushedCellsCount = 0; + private long mobFlushedCellsSize = 0; + private long mobScanCellsCount = 0; + private long mobScanCellsSize = 0; + private long blockedRequestsCount = 0L; + private long averageRegionSize = 0L; + private long totalReadRequestsDelta = 0; + private long totalWriteRequestsDelta = 0; + + private RegionMetricAggregate(RegionMetricAggregate other) { + if (other != null) { + requestsPerSecond = other.requestsPerSecond; + readRequestsRatePerSecond = other.readRequestsRatePerSecond; + writeRequestsRatePerSecond = other.writeRequestsRatePerSecond; + } + } + + private void aggregate(HRegionServer regionServer, + Map> requestsCountCache) { + HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); + HDFSBlocksDistribution hdfsBlocksDistributionSecondaryRegions = new HDFSBlocksDistribution(); + + long avgAgeNumerator = 0; + long numHFiles = 0; + int regionCount = 0; + + for (HRegion r : regionServer.getOnlineRegionsLocalContext()) { + Deltas deltas = calculateReadWriteDeltas(r, requestsCountCache); + totalReadRequestsDelta += deltas.readRequestsCountDelta; + totalWriteRequestsDelta += deltas.writeRequestsCountDelta; + + numMutationsWithoutWAL += r.getNumMutationsWithoutWAL(); + dataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL(); + cpRequestsCount += r.getCpRequestsCount(); + readRequestsCount += r.getReadRequestsCount(); + filteredReadRequestsCount += r.getFilteredReadRequestsCount(); + writeRequestsCount += r.getWriteRequestsCount(); + checkAndMutateChecksFailed += r.getCheckAndMutateChecksFailed(); + checkAndMutateChecksPassed += r.getCheckAndMutateChecksPassed(); + blockedRequestsCount += r.getBlockedRequestsCount(); + + StoreFileStats storeFileStats = aggregateStores(r.getStores()); + numHFiles += storeFileStats.numHFiles; + avgAgeNumerator += storeFileStats.avgAgeNumerator; + + HDFSBlocksDistribution distro = r.getHDFSBlocksDistribution(); + hdfsBlocksDistribution.add(distro); + if (r.getRegionInfo().getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { + hdfsBlocksDistributionSecondaryRegions.add(distro); + } + + regionCount++; + } + + float localityIndex = + hdfsBlocksDistribution.getBlockLocalityIndex(regionServer.getServerName().getHostname()); + percentFileLocal = Double.isNaN(localityIndex) ? 0 : (localityIndex * 100); + + float localityIndexSecondaryRegions = hdfsBlocksDistributionSecondaryRegions + .getBlockLocalityIndex(regionServer.getServerName().getHostname()); + percentFileLocalSecondaryRegions = + Double.isNaN(localityIndexSecondaryRegions) ? 0 : (localityIndexSecondaryRegions * 100); + + if (regionCount > 0) { + averageRegionSize = (memstoreSize + storeFileSize) / regionCount; + } + + // if there were no store files, we'll never have updated this with Math.min + // so set it to 0, which is a better value to display in case of no storefiles + if (minStoreFileAge == Long.MAX_VALUE) { + this.minStoreFileAge = 0; + } + + if (numHFiles != 0) { + avgStoreFileAge = avgAgeNumerator / numHFiles; + } + } + + private static final class Deltas { + private final long readRequestsCountDelta; + private final long writeRequestsCountDelta; + + private Deltas(long readRequestsCountDelta, long writeRequestsCountDelta) { + this.readRequestsCountDelta = readRequestsCountDelta; + this.writeRequestsCountDelta = writeRequestsCountDelta; + } + } + + private Deltas calculateReadWriteDeltas(HRegion r, + Map> requestsCountCache) { + String encodedRegionName = r.getRegionInfo().getEncodedName(); + long currentReadRequestsCount = r.getReadRequestsCount(); + long currentWriteRequestsCount = r.getWriteRequestsCount(); + if (requestsCountCache.containsKey(encodedRegionName)) { + long lastReadRequestsCount = requestsCountCache.get(encodedRegionName).get(0); + long lastWriteRequestsCount = requestsCountCache.get(encodedRegionName).get(1); + + // Update cache for our next comparison + requestsCountCache.get(encodedRegionName).set(0, currentReadRequestsCount); + requestsCountCache.get(encodedRegionName).set(1, currentWriteRequestsCount); + + long readRequestsDelta = currentReadRequestsCount - lastReadRequestsCount; + long writeRequestsDelta = currentWriteRequestsCount - lastWriteRequestsCount; + return new Deltas(readRequestsDelta, writeRequestsDelta); + } else { + // List[0] -> readRequestCount + // List[1] -> writeRequestCount + ArrayList requests = new ArrayList(2); + requests.add(currentReadRequestsCount); + requests.add(currentWriteRequestsCount); + requestsCountCache.put(encodedRegionName, requests); + return new Deltas(currentReadRequestsCount, currentWriteRequestsCount); + } + } + + public void updateRates(long timeSinceLastRun, long expectedPeriod, long lastStoreFileSize) { + requestsPerSecond = + (totalReadRequestsDelta + totalWriteRequestsDelta) / (timeSinceLastRun / 1000.0); + + double readRequestsRatePerMilliSecond = (double) totalReadRequestsDelta / expectedPeriod; + double writeRequestsRatePerMilliSecond = (double) totalWriteRequestsDelta / expectedPeriod; + + readRequestsRatePerSecond = readRequestsRatePerMilliSecond * 1000.0; + writeRequestsRatePerSecond = writeRequestsRatePerMilliSecond * 1000.0; + + long intervalStoreFileSize = storeFileSize - lastStoreFileSize; + storeFileSizeGrowthRate = (double) intervalStoreFileSize * 1000.0 / expectedPeriod; + } + + private static final class StoreFileStats { + private final long numHFiles; + private final long avgAgeNumerator; + + private StoreFileStats(long numHFiles, long avgAgeNumerator) { + this.numHFiles = numHFiles; + this.avgAgeNumerator = avgAgeNumerator; + } + } + + private StoreFileStats aggregateStores(List stores) { + numStores += stores.size(); + long numHFiles = 0; + long avgAgeNumerator = 0; + for (Store store : stores) { + numStoreFiles += store.getStorefilesCount(); + memstoreSize += store.getMemStoreSize().getDataSize(); + onHeapMemstoreSize += store.getMemStoreSize().getHeapSize(); + offHeapMemstoreSize += store.getMemStoreSize().getOffHeapSize(); + storeFileSize += store.getStorefilesSize(); + maxStoreFileCount = Math.max(maxStoreFileCount, store.getStorefilesCount()); + + maxStoreFileAge = + Math.max(store.getMaxStoreFileAge().orElse(maxStoreFileAge), maxStoreFileAge); + minStoreFileAge = + Math.min(store.getMinStoreFileAge().orElse(minStoreFileAge), minStoreFileAge); + + long storeHFiles = store.getNumHFiles(); + numHFiles += storeHFiles; + numReferenceFiles += store.getNumReferenceFiles(); + + OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge(); + if (storeAvgStoreFileAge.isPresent()) { + avgAgeNumerator = + (long) (avgAgeNumerator + storeAvgStoreFileAge.getAsDouble() * storeHFiles); + } + + storefileIndexSize += store.getStorefilesRootLevelIndexSize(); + totalStaticBloomSize += store.getTotalStaticBloomSize(); + totalStaticIndexSize += store.getTotalStaticIndexSize(); + bloomFilterRequestsCount += store.getBloomFilterRequestsCount(); + bloomFilterNegativeResultsCount += store.getBloomFilterNegativeResultsCount(); + bloomFilterEligibleRequestsCount += store.getBloomFilterEligibleRequestsCount(); + flushedCellsCount += store.getFlushedCellsCount(); + compactedCellsCount += store.getCompactedCellsCount(); + majorCompactedCellsCount += store.getMajorCompactedCellsCount(); + flushedCellsSize += store.getFlushedCellsSize(); + compactedCellsSize += store.getCompactedCellsSize(); + majorCompactedCellsSize += store.getMajorCompactedCellsSize(); + if (store instanceof HMobStore) { + HMobStore mobStore = (HMobStore) store; + cellsCountCompactedToMob += mobStore.getCellsCountCompactedToMob(); + cellsCountCompactedFromMob += mobStore.getCellsCountCompactedFromMob(); + cellsSizeCompactedToMob += mobStore.getCellsSizeCompactedToMob(); + cellsSizeCompactedFromMob += mobStore.getCellsSizeCompactedFromMob(); + mobFlushCount += mobStore.getMobFlushCount(); + mobFlushedCellsCount += mobStore.getMobFlushedCellsCount(); + mobFlushedCellsSize += mobStore.getMobFlushedCellsSize(); + mobScanCellsCount += mobStore.getMobScanCellsCount(); + mobScanCellsSize += mobStore.getMobScanCellsSize(); + } + } + + return new StoreFileStats(numHFiles, avgAgeNumerator); + } + + } + /** * This is the runnable that will be executed on the executor every PERIOD number of seconds It * will take metrics/numbers from all of the regions and use them to compute point in time @@ -790,170 +984,8 @@ public class RegionServerMetricsWrapperRunnable implements Runnable { @Override synchronized public void run() { try { - HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - HDFSBlocksDistribution hdfsBlocksDistributionSecondaryRegions = - new HDFSBlocksDistribution(); - - long tempNumStores = 0, tempNumStoreFiles = 0, tempStoreFileSize = 0; - long tempMemstoreSize = 0, tempOnHeapMemstoreSize = 0, tempOffHeapMemstoreSize = 0; - long tempMaxStoreFileAge = 0, tempNumReferenceFiles = 0; - long tempMaxStoreFileCount = 0; - long avgAgeNumerator = 0, numHFiles = 0; - long tempMinStoreFileAge = Long.MAX_VALUE; - long tempFilteredReadRequestsCount = 0, tempCpRequestsCount = 0; - long tempCheckAndMutateChecksFailed = 0; - long tempCheckAndMutateChecksPassed = 0; - long tempStorefileIndexSize = 0; - long tempTotalStaticIndexSize = 0; - long tempTotalStaticBloomSize = 0; - long tempBloomFilterRequestsCount = 0; - long tempBloomFilterNegativeResultsCount = 0; - long tempBloomFilterEligibleRequestsCount = 0; - long tempNumMutationsWithoutWAL = 0; - long tempDataInMemoryWithoutWAL = 0; - double tempPercentFileLocal = 0; - double tempPercentFileLocalSecondaryRegions = 0; - long tempFlushedCellsCount = 0; - long tempCompactedCellsCount = 0; - long tempMajorCompactedCellsCount = 0; - long tempFlushedCellsSize = 0; - long tempCompactedCellsSize = 0; - long tempMajorCompactedCellsSize = 0; - long tempCellsCountCompactedToMob = 0; - long tempCellsCountCompactedFromMob = 0; - long tempCellsSizeCompactedToMob = 0; - long tempCellsSizeCompactedFromMob = 0; - long tempMobFlushCount = 0; - long tempMobFlushedCellsCount = 0; - long tempMobFlushedCellsSize = 0; - long tempMobScanCellsCount = 0; - long tempMobScanCellsSize = 0; - long tempBlockedRequestsCount = 0; - int regionCount = 0; - - long tempReadRequestsCount = 0; - long tempWriteRequestsCount = 0; - long currentReadRequestsCount = 0; - long currentWriteRequestsCount = 0; - long lastReadRequestsCount = 0; - long lastWriteRequestsCount = 0; - long readRequestsDelta = 0; - long writeRequestsDelta = 0; - long totalReadRequestsDelta = 0; - long totalWriteRequestsDelta = 0; - String encodedRegionName; - for (HRegion r : regionServer.getOnlineRegionsLocalContext()) { - encodedRegionName = r.getRegionInfo().getEncodedName(); - currentReadRequestsCount = r.getReadRequestsCount(); - currentWriteRequestsCount = r.getWriteRequestsCount(); - if (requestsCountCache.containsKey(encodedRegionName)) { - lastReadRequestsCount = requestsCountCache.get(encodedRegionName).get(0); - lastWriteRequestsCount = requestsCountCache.get(encodedRegionName).get(1); - readRequestsDelta = currentReadRequestsCount - lastReadRequestsCount; - writeRequestsDelta = currentWriteRequestsCount - lastWriteRequestsCount; - totalReadRequestsDelta += readRequestsDelta; - totalWriteRequestsDelta += writeRequestsDelta; - // Update cache for our next comparision - requestsCountCache.get(encodedRegionName).set(0, currentReadRequestsCount); - requestsCountCache.get(encodedRegionName).set(1, currentWriteRequestsCount); - } else { - // List[0] -> readRequestCount - // List[1] -> writeRequestCount - ArrayList requests = new ArrayList(2); - requests.add(currentReadRequestsCount); - requests.add(currentWriteRequestsCount); - requestsCountCache.put(encodedRegionName, requests); - totalReadRequestsDelta += currentReadRequestsCount; - totalWriteRequestsDelta += currentWriteRequestsCount; - } - tempReadRequestsCount += r.getReadRequestsCount(); - tempWriteRequestsCount += r.getWriteRequestsCount(); - tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL(); - tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL(); - tempCpRequestsCount += r.getCpRequestsCount(); - tempFilteredReadRequestsCount += r.getFilteredReadRequestsCount(); - tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed(); - tempCheckAndMutateChecksPassed += r.getCheckAndMutateChecksPassed(); - tempBlockedRequestsCount += r.getBlockedRequestsCount(); - List storeList = r.getStores(); - tempNumStores += storeList.size(); - for (Store store : storeList) { - tempNumStoreFiles += store.getStorefilesCount(); - tempMemstoreSize += store.getMemStoreSize().getDataSize(); - tempOnHeapMemstoreSize += store.getMemStoreSize().getHeapSize(); - tempOffHeapMemstoreSize += store.getMemStoreSize().getOffHeapSize(); - tempStoreFileSize += store.getStorefilesSize(); - - tempMaxStoreFileCount = Math.max(tempMaxStoreFileCount, store.getStorefilesCount()); - - OptionalLong storeMaxStoreFileAge = store.getMaxStoreFileAge(); - if ( - storeMaxStoreFileAge.isPresent() - && storeMaxStoreFileAge.getAsLong() > tempMaxStoreFileAge - ) { - tempMaxStoreFileAge = storeMaxStoreFileAge.getAsLong(); - } - - OptionalLong storeMinStoreFileAge = store.getMinStoreFileAge(); - if ( - storeMinStoreFileAge.isPresent() - && storeMinStoreFileAge.getAsLong() < tempMinStoreFileAge - ) { - tempMinStoreFileAge = storeMinStoreFileAge.getAsLong(); - } - - long storeHFiles = store.getNumHFiles(); - numHFiles += storeHFiles; - tempNumReferenceFiles += store.getNumReferenceFiles(); - - OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge(); - if (storeAvgStoreFileAge.isPresent()) { - avgAgeNumerator = - (long) (avgAgeNumerator + storeAvgStoreFileAge.getAsDouble() * storeHFiles); - } - - tempStorefileIndexSize += store.getStorefilesRootLevelIndexSize(); - tempTotalStaticBloomSize += store.getTotalStaticBloomSize(); - tempTotalStaticIndexSize += store.getTotalStaticIndexSize(); - tempBloomFilterRequestsCount += store.getBloomFilterRequestsCount(); - tempBloomFilterNegativeResultsCount += store.getBloomFilterNegativeResultsCount(); - tempBloomFilterEligibleRequestsCount += store.getBloomFilterEligibleRequestsCount(); - tempFlushedCellsCount += store.getFlushedCellsCount(); - tempCompactedCellsCount += store.getCompactedCellsCount(); - tempMajorCompactedCellsCount += store.getMajorCompactedCellsCount(); - tempFlushedCellsSize += store.getFlushedCellsSize(); - tempCompactedCellsSize += store.getCompactedCellsSize(); - tempMajorCompactedCellsSize += store.getMajorCompactedCellsSize(); - if (store instanceof HMobStore) { - HMobStore mobStore = (HMobStore) store; - tempCellsCountCompactedToMob += mobStore.getCellsCountCompactedToMob(); - tempCellsCountCompactedFromMob += mobStore.getCellsCountCompactedFromMob(); - tempCellsSizeCompactedToMob += mobStore.getCellsSizeCompactedToMob(); - tempCellsSizeCompactedFromMob += mobStore.getCellsSizeCompactedFromMob(); - tempMobFlushCount += mobStore.getMobFlushCount(); - tempMobFlushedCellsCount += mobStore.getMobFlushedCellsCount(); - tempMobFlushedCellsSize += mobStore.getMobFlushedCellsSize(); - tempMobScanCellsCount += mobStore.getMobScanCellsCount(); - tempMobScanCellsSize += mobStore.getMobScanCellsSize(); - } - } - - HDFSBlocksDistribution distro = r.getHDFSBlocksDistribution(); - hdfsBlocksDistribution.add(distro); - if (r.getRegionInfo().getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - hdfsBlocksDistributionSecondaryRegions.add(distro); - } - regionCount++; - } - - float localityIndex = - hdfsBlocksDistribution.getBlockLocalityIndex(regionServer.getServerName().getHostname()); - tempPercentFileLocal = Double.isNaN(tempBlockedRequestsCount) ? 0 : (localityIndex * 100); - - float localityIndexSecondaryRegions = hdfsBlocksDistributionSecondaryRegions - .getBlockLocalityIndex(regionServer.getServerName().getHostname()); - tempPercentFileLocalSecondaryRegions = - Double.isNaN(localityIndexSecondaryRegions) ? 0 : (localityIndexSecondaryRegions * 100); + RegionMetricAggregate newVal = new RegionMetricAggregate(aggregate); + newVal.aggregate(regionServer, requestsCountCache); // Compute the number of requests per second long currentTime = EnvironmentEdgeManager.currentTime(); @@ -963,24 +995,14 @@ synchronized public void run() { if (lastRan == 0) { lastRan = currentTime - period; } - // If we've time traveled keep the last requests per second. - if ((currentTime - lastRan) > 0) { - requestsPerSecond = - (totalReadRequestsDelta + totalWriteRequestsDelta) / ((currentTime - lastRan) / 1000.0); - - double readRequestsRatePerMilliSecond = (double) totalReadRequestsDelta / period; - double writeRequestsRatePerMilliSecond = (double) totalWriteRequestsDelta / period; - - readRequestsRatePerSecond = readRequestsRatePerMilliSecond * 1000.0; - writeRequestsRatePerSecond = writeRequestsRatePerMilliSecond * 1000.0; - - long intervalStoreFileSize = tempStoreFileSize - lastStoreFileSize; - storeFileSizeGrowthRate = (double) intervalStoreFileSize * 1000.0 / period; - lastStoreFileSize = tempStoreFileSize; + long timeSinceLastRun = currentTime - lastRan; + // If we've time traveled keep the last requests per second. + if (timeSinceLastRun > 0) { + newVal.updateRates(timeSinceLastRun, period, lastStoreFileSize); } - lastRan = currentTime; + aggregate = newVal; List providers = regionServer.getWalFactory().getAllWALProviders(); for (WALProvider provider : providers) { @@ -988,58 +1010,6 @@ synchronized public void run() { walFileSize += provider.getLogFileSize(); } - // Copy over computed values so that no thread sees half computed values. - numStores = tempNumStores; - numStoreFiles = tempNumStoreFiles; - memstoreSize = tempMemstoreSize; - onHeapMemstoreSize = tempOnHeapMemstoreSize; - offHeapMemstoreSize = tempOffHeapMemstoreSize; - storeFileSize = tempStoreFileSize; - maxStoreFileCount = tempMaxStoreFileCount; - maxStoreFileAge = tempMaxStoreFileAge; - if (regionCount > 0) { - averageRegionSize = (memstoreSize + storeFileSize) / regionCount; - } - if (tempMinStoreFileAge != Long.MAX_VALUE) { - minStoreFileAge = tempMinStoreFileAge; - } - - if (numHFiles != 0) { - avgStoreFileAge = avgAgeNumerator / numHFiles; - } - - numReferenceFiles = tempNumReferenceFiles; - readRequestsCount = tempReadRequestsCount; - cpRequestsCount = tempCpRequestsCount; - filteredReadRequestsCount = tempFilteredReadRequestsCount; - writeRequestsCount = tempWriteRequestsCount; - checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed; - checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed; - storefileIndexSize = tempStorefileIndexSize; - totalStaticIndexSize = tempTotalStaticIndexSize; - totalStaticBloomSize = tempTotalStaticBloomSize; - bloomFilterRequestsCount = tempBloomFilterRequestsCount; - bloomFilterNegativeResultsCount = tempBloomFilterNegativeResultsCount; - bloomFilterEligibleRequestsCount = tempBloomFilterEligibleRequestsCount; - numMutationsWithoutWAL = tempNumMutationsWithoutWAL; - dataInMemoryWithoutWAL = tempDataInMemoryWithoutWAL; - percentFileLocal = tempPercentFileLocal; - percentFileLocalSecondaryRegions = tempPercentFileLocalSecondaryRegions; - flushedCellsCount = tempFlushedCellsCount; - compactedCellsCount = tempCompactedCellsCount; - majorCompactedCellsCount = tempMajorCompactedCellsCount; - flushedCellsSize = tempFlushedCellsSize; - compactedCellsSize = tempCompactedCellsSize; - majorCompactedCellsSize = tempMajorCompactedCellsSize; - cellsCountCompactedToMob = tempCellsCountCompactedToMob; - cellsCountCompactedFromMob = tempCellsCountCompactedFromMob; - cellsSizeCompactedToMob = tempCellsSizeCompactedToMob; - cellsSizeCompactedFromMob = tempCellsSizeCompactedFromMob; - mobFlushCount = tempMobFlushCount; - mobFlushedCellsCount = tempMobFlushedCellsCount; - mobFlushedCellsSize = tempMobFlushedCellsSize; - mobScanCellsCount = tempMobScanCellsCount; - mobScanCellsSize = tempMobScanCellsSize; mobFileCacheAccessCount = mobFileCache != null ? mobFileCache.getAccessCount() : 0L; mobFileCacheMissCount = mobFileCache != null ? mobFileCache.getMissCount() : 0L; mobFileCacheHitRatio = mobFileCache != null ? mobFileCache.getHitRatio() : 0.0; @@ -1048,7 +1018,9 @@ synchronized public void run() { } mobFileCacheEvictedCount = mobFileCache != null ? mobFileCache.getEvictedFileCount() : 0L; mobFileCacheCount = mobFileCache != null ? mobFileCache.getCacheSize() : 0; - blockedRequestsCount = tempBlockedRequestsCount; + + lastStoreFileSize = aggregate.storeFileSize; + lastRan = currentTime; } catch (Throwable e) { LOG.warn("Caught exception! Will suppress and retry.", e); } @@ -1094,12 +1066,12 @@ public long getZeroCopyBytesRead() { @Override public long getBlockedRequestsCount() { - return blockedRequestsCount; + return aggregate.blockedRequestsCount; } @Override public long getAverageRegionSize() { - return averageRegionSize; + return aggregate.averageRegionSize; } @Override @@ -1226,4 +1198,9 @@ public long getByteBuffAllocatorTotalBufferCount() { public long getByteBuffAllocatorUsedBufferCount() { return this.allocator.getUsedBufferCount(); } + + // Visible for testing + long getPeriod() { + return period; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerAggregate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerAggregate.java new file mode 100644 index 000000000000..428416833875 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerAggregate.java @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.OptionalDouble; +import java.util.OptionalLong; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category({ SmallTests.class, RegionServerTests.class }) +public class TestMetricsRegionServerAggregate { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMetricsRegionServerAggregate.class); + + @Test + public void test() { + AtomicInteger retVal = new AtomicInteger(0); + Answer defaultAnswer = invocation -> { + Class returnType = invocation.getMethod().getReturnType(); + + if (returnType.equals(Integer.TYPE) || returnType.equals(Integer.class)) { + return retVal.get(); + } else if (returnType.equals(Long.TYPE) || returnType.equals(Long.class)) { + return (long) retVal.get(); + } + return Mockito.RETURNS_DEFAULTS.answer(invocation); + }; + + ServerName serverName = mock(ServerName.class); + when(serverName.getHostname()).thenReturn("foo"); + WALFactory walFactory = mock(WALFactory.class); + RpcServerInterface rpcServer = mock(RpcServerInterface.class); + AtomicInteger storeFileCount = new AtomicInteger(1); + HRegion regionOne = getMockedRegion(defaultAnswer, "a", "foo", true, storeFileCount); + HRegion regionTwo = getMockedRegion(defaultAnswer, "b", "bar", true, storeFileCount); + HRegion regionThree = getMockedRegion(defaultAnswer, "c", "foo", false, storeFileCount); + HRegion regionFour = getMockedRegion(defaultAnswer, "d", "bar", false, storeFileCount); + List regions = Lists.newArrayList(regionOne, regionTwo, regionThree, regionFour); + + int numStoresPerRegion = 2; + for (HRegion region : regions) { + // if adding more stores, update numStoresPerRegion so that tests below continue working + assertEquals(numStoresPerRegion, region.getStores().size()); + } + + HRegionServer regionServer = mock(HRegionServer.class, defaultAnswer); + when(regionServer.getWalFactory()).thenReturn(walFactory); + when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); + when(regionServer.getServerName()).thenReturn(serverName); + Configuration conf = HBaseConfiguration.create(); + int metricsPeriodSec = 600; + // set a very long period so that it doesn't actually run during our very quick test + conf.setLong(HConstants.REGIONSERVER_METRICS_PERIOD, metricsPeriodSec * 1000); + when(regionServer.getConfiguration()).thenReturn(conf); + when(regionServer.getRpcServer()).thenReturn(rpcServer); + + MetricsRegionServerWrapperImpl wrapper = new MetricsRegionServerWrapperImpl(regionServer); + + // we need to control the edge because rate calculations expect a + // stable interval relative to the configured period + ManualEnvironmentEdge edge = new ManualEnvironmentEdge(); + EnvironmentEdgeManager.injectEdge(edge); + + try { + for (int i = 1; i <= 10; i++) { + edge.incValue(wrapper.getPeriod()); + retVal.incrementAndGet(); + wrapper.forceRecompute(); + + int numRegions = regions.size(); + int totalStores = numRegions * numStoresPerRegion; + + // there are N regions, and each has M stores. everything gets aggregated, so + // multiply expected values accordingly + int expectedForRegions = retVal.get() * numRegions; + int expectedForStores = retVal.get() * totalStores; + + assertEquals(totalStores, wrapper.getNumStores()); + assertEquals(expectedForStores, wrapper.getFlushedCellsCount()); + assertEquals(expectedForStores, wrapper.getCompactedCellsCount()); + assertEquals(expectedForStores, wrapper.getMajorCompactedCellsCount()); + assertEquals(expectedForStores, wrapper.getFlushedCellsSize()); + assertEquals(expectedForStores, wrapper.getCompactedCellsSize()); + assertEquals(expectedForStores, wrapper.getMajorCompactedCellsSize()); + assertEquals(expectedForRegions, wrapper.getCellsCountCompactedFromMob()); + assertEquals(expectedForRegions, wrapper.getCellsCountCompactedToMob()); + assertEquals(expectedForRegions, wrapper.getCellsSizeCompactedFromMob()); + assertEquals(expectedForRegions, wrapper.getCellsSizeCompactedToMob()); + assertEquals(expectedForRegions, wrapper.getMobFlushCount()); + assertEquals(expectedForRegions, wrapper.getMobFlushedCellsCount()); + assertEquals(expectedForRegions, wrapper.getMobFlushedCellsSize()); + assertEquals(expectedForRegions, wrapper.getMobScanCellsCount()); + assertEquals(expectedForRegions, wrapper.getMobScanCellsSize()); + assertEquals(expectedForRegions, wrapper.getCheckAndMutateChecksFailed()); + assertEquals(expectedForRegions, wrapper.getCheckAndMutateChecksPassed()); + assertEquals(expectedForStores, wrapper.getStoreFileIndexSize()); + assertEquals(expectedForStores, wrapper.getTotalStaticIndexSize()); + assertEquals(expectedForStores, wrapper.getTotalStaticBloomSize()); + assertEquals(expectedForStores, wrapper.getBloomFilterRequestsCount()); + assertEquals(expectedForStores, wrapper.getBloomFilterNegativeResultsCount()); + assertEquals(expectedForStores, wrapper.getBloomFilterEligibleRequestsCount()); + assertEquals(expectedForRegions, wrapper.getNumMutationsWithoutWAL()); + assertEquals(expectedForRegions, wrapper.getDataInMemoryWithoutWAL()); + assertEquals(expectedForRegions, wrapper.getAverageRegionSize()); + assertEquals(expectedForRegions, wrapper.getBlockedRequestsCount()); + assertEquals(expectedForStores, wrapper.getNumReferenceFiles()); + assertEquals(expectedForStores, wrapper.getMemStoreSize()); + assertEquals(expectedForStores, wrapper.getOnHeapMemStoreSize()); + assertEquals(expectedForStores, wrapper.getOffHeapMemStoreSize()); + assertEquals(expectedForStores, wrapper.getStoreFileSize()); + assertEquals(expectedForRegions, wrapper.getReadRequestsCount()); + assertEquals(expectedForRegions, wrapper.getCpRequestsCount()); + assertEquals(expectedForRegions, wrapper.getFilteredReadRequestsCount()); + assertEquals(expectedForRegions, wrapper.getWriteRequestsCount()); + assertEquals(expectedForRegions * 2, wrapper.getTotalRowActionRequestCount()); + + // If we have N regions, each with M stores. That's N*M stores in total. In creating those + // stores, we increment the number and age of storefiles for each one. So the first + // store has 1 file of 1 age, then 2 files of 2 age, etc. + // formula for 1+2+3..+n + assertEquals((totalStores * (totalStores + 1)) / 2, wrapper.getNumStoreFiles()); + assertEquals(totalStores, wrapper.getMaxStoreFiles()); + assertEquals(totalStores, wrapper.getMaxStoreFileAge()); + assertEquals(1, wrapper.getMinStoreFileAge()); + assertEquals(totalStores / 2, wrapper.getAvgStoreFileAge()); + + // there are four regions, two are primary and the other two secondary + // for each type, one region has 100% locality, the other has 0%. + // this just proves we correctly aggregate for each + assertEquals(50.0, wrapper.getPercentFileLocal(), 0.0001); + assertEquals(50.0, wrapper.getPercentFileLocalSecondaryRegions(), 0.0001); + + // readRequestCount and writeRequestCount are tracking the value of i, which increases by 1 + // each interval. There are N regions, so the delta each interval is N*i=N. So the rate is + // simply N / period. + assertEquals((double) numRegions / metricsPeriodSec, wrapper.getReadRequestsRatePerSecond(), + 0.0001); + assertEquals((double) numRegions / metricsPeriodSec, + wrapper.getWriteRequestsRatePerSecond(), 0.0001); + // total of above, so multiply by 2 + assertEquals((double) numRegions / metricsPeriodSec * 2, wrapper.getRequestsPerSecond(), + 0.0001); + // Similar logic to above, except there are M totalStores and each one is of + // size tracking i. So the rate is just M / period. + assertEquals((double) totalStores / metricsPeriodSec, wrapper.getStoreFileSizeGrowthRate(), + 0.0001); + } + } finally { + EnvironmentEdgeManager.reset(); + } + } + + private HRegion getMockedRegion(Answer defaultAnswer, String name, String localOnHost, + boolean isPrimary, AtomicInteger storeFileCount) { + RegionInfo regionInfo = mock(RegionInfo.class); + when(regionInfo.getEncodedName()).thenReturn(name); + if (!isPrimary) { + when(regionInfo.getReplicaId()).thenReturn(RegionInfo.DEFAULT_REPLICA_ID + 1); + } + HDFSBlocksDistribution distribution = new HDFSBlocksDistribution(); + distribution.addHostsAndBlockWeight(new String[] { localOnHost }, 100); + + HStore store = getMockedStore(HStore.class, defaultAnswer, storeFileCount); + HMobStore mobStore = getMockedStore(HMobStore.class, defaultAnswer, storeFileCount); + + HRegion region = mock(HRegion.class, defaultAnswer); + when(region.getRegionInfo()).thenReturn(regionInfo); + when(region.getHDFSBlocksDistribution()).thenReturn(distribution); + when(region.getStores()).thenReturn(Lists.newArrayList(store, mobStore)); + return region; + } + + private T getMockedStore(Class clazz, Answer defaultAnswer, + AtomicInteger storeFileCount) { + T store = mock(clazz, defaultAnswer); + int storeFileCountVal = storeFileCount.getAndIncrement(); + when(store.getStorefilesCount()).thenReturn(storeFileCountVal); + when(store.getAvgStoreFileAge()).thenReturn(OptionalDouble.of(storeFileCountVal)); + when(store.getMaxStoreFileAge()).thenReturn(OptionalLong.of(storeFileCountVal)); + when(store.getMinStoreFileAge()).thenReturn(OptionalLong.of(storeFileCountVal)); + MemStoreSize memStore = mock(MemStoreSize.class, defaultAnswer); + when(store.getMemStoreSize()).thenReturn(memStore); + return store; + } + +} From 82a2ce10f24a828b2c4960ba85b714a0203c8441 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 9 Dec 2023 21:55:11 +0800 Subject: [PATCH 164/514] HBASE-28248 Race between RegionRemoteProcedureBase and rollback operation could lead to ROLLEDBACK state be persisent to procedure store (#5567) Signed-off-by: GeorryHuang Signed-off-by: Yi Mei --- .../assignment/RegionRemoteProcedureBase.java | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index d27e0068b0ca..f6668d9c14b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionRemoteProcedureBaseState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionRemoteProcedureBaseStateData; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; /** @@ -183,7 +184,20 @@ protected abstract void updateTransitionWithoutPersistingToMeta(MasterProcedureE // A bit strange but the procedure store will throw RuntimeException if we can not persist the // state, so upper layer should take care of this... private void persistAndWake(MasterProcedureEnv env, RegionStateNode regionNode) { - env.getMasterServices().getMasterProcedureExecutor().getStore().update(this); + // The synchronization here is to guard with ProcedureExecutor.executeRollback, as here we will + // not hold the procedure execution lock, but we should not persist a procedure in ROLLEDBACK + // state to the procedure store. + // The ProcedureStore.update must be inside the lock, so here the check for procedure state and + // update could be atomic. In ProcedureExecutor.cleanupAfterRollbackOneStep, we will set the + // state to ROLLEDBACK, which will hold the same lock too as the Procedure.setState method is + // synchronized. This is the key to keep us safe. + synchronized (this) { + if (getState() == ProcedureState.ROLLEDBACK) { + LOG.warn("Procedure {} has already been rolled back, skip persistent", this); + return; + } + env.getMasterServices().getMasterProcedureExecutor().getStore().update(this); + } regionNode.getProcedureEvent().wake(env.getProcedureScheduler()); } From 25c639f9d6deb52367089e2631004f4d6862daea Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Mon, 11 Dec 2023 10:17:33 +0000 Subject: [PATCH 165/514] HBASE-28251 [SFT] Add description for specifying SFT impl during snapshot recovery (#5570) Signed-off-by: Duo Zhang Signed-off-by: Nihal Jain Signed-off-by: Peter Somogyi --- .../_chapters/bulk_data_generator_tool.adoc | 8 ++++---- .../_chapters/store_file_tracking.adoc | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc b/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc index 3ac6ca693121..b04fcdeb7264 100644 --- a/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc +++ b/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc @@ -18,8 +18,8 @@ * limitations under the License. */ //// - -== Bulk Data Generator Tool +[[BulkDataGeneratorTool]] += Bulk Data Generator Tool :doctype: book :numbered: :toc: left @@ -29,7 +29,7 @@ This is a random data generator tool for HBase tables leveraging Hbase bulk load. It can create pre-splited HBase table and the generated data is *uniformly distributed* to all the regions of the table. -=== How to Use +== Usage [source] ---- @@ -53,7 +53,7 @@ hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TE hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10 -Dmapreduce.map.memory.mb=8192 ---- -=== How it Works +== Overview ==== Table Schema Tool generates a HBase table with single column family, i.e. *cf* and 9 columns i.e. diff --git a/src/main/asciidoc/_chapters/store_file_tracking.adoc b/src/main/asciidoc/_chapters/store_file_tracking.adoc index 74d802f386c5..b6c1f7e73399 100644 --- a/src/main/asciidoc/_chapters/store_file_tracking.adoc +++ b/src/main/asciidoc/_chapters/store_file_tracking.adoc @@ -143,3 +143,22 @@ example, that would be as follows: ---- alter 'my-table', CONFIGURATION => {'hbase.store.file-tracker.impl' => 'FILE'} ---- + +### Specifying trackers during snapshot recovery + +It's also possible to specify a given store file tracking implementation when recovering a snapshot +using the _CLONE_SFT_ option of _clone_snasphot_ command. This is useful when recovering old +snapshots, taken prior to a change in the global configuration, or if the snapshot has been +imported from a different cluster that had a different store file tracking setting. +Because snapshots preserve table and colum family descriptors, a simple restore would reload +the original configuration, requiring the additional steps described above to convert the +table/column family to the desired tracker implementation. +An example of how to use _clone_snapshot_ to specify the *FILE* tracker implementation +is shown below: + +---- +clone_snapshot 'snapshotName', 'namespace:tableName', {CLONE_SFT=>'FILE'} +---- + +NOTE: The option to specify the tracker during snapshot recovery is only available for the +_clone_snapshot_ command. The _restore_snapshot_ command does not support this parameter. From 78c5ac372550835133935a3022a0142880476297 Mon Sep 17 00:00:00 2001 From: Fantasy-Jay <13631435453@163.com> Date: Mon, 11 Dec 2023 23:22:13 +0800 Subject: [PATCH 166/514] HBASE-28190 Add slow sync log rolling test in TestAsyncLogRolling. (#5507) Signed-off-by: Duo Zhang --- .../hbase/regionserver/wal/AbstractFSWAL.java | 4 + .../hadoop/hbase/regionserver/wal/FSHLog.java | 8 - .../wal/AbstractTestLogRolling.java | 106 +++++++- .../regionserver/wal/TestAsyncLogRolling.java | 65 +++++ .../regionserver/wal/TestLogRolling.java | 234 ++++-------------- 5 files changed, 218 insertions(+), 199 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index acf3231d4e90..1a5b5384b01f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -2245,6 +2245,10 @@ private static void split(final Configuration conf, final Path p) throws IOExcep WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf)); } + W getWriter() { + return this.writer; + } + private static void usage() { System.err.println("Usage: AbstractFSWAL "); System.err.println("Arguments:"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index d0d5ce5f2e17..131f284557af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -603,14 +603,6 @@ DatanodeInfo[] getPipeline() { return new DatanodeInfo[0]; } - Writer getWriter() { - return this.writer; - } - - void setWriter(Writer writer) { - this.writer = writer; - } - @Override protected Writer createCombinedWriter(Writer localWriter, Writer remoteWriter) { // put remote writer first as usually it will cost more time to finish, so we write to it first diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 940dbebf614b..2a5aec458828 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -20,9 +20,13 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThan; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -31,6 +35,7 @@ import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Get; @@ -48,8 +53,10 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -59,6 +66,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Test log deletion as logs are rolled. */ @@ -74,6 +83,10 @@ public abstract class AbstractTestLogRolling { protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @Rule public final TestName name = new TestName(); + protected static int syncLatencyMillis; + private static int rowNum = 1; + private static final AtomicBoolean slowSyncHookCalled = new AtomicBoolean(); + protected static ScheduledExecutorService EXECUTOR; public AbstractTestLogRolling() { this.server = null; @@ -118,6 +131,17 @@ public static void setUpBeforeClass() throws Exception { // disable low replication check for log roller to get a more stable result // TestWALOpenAfterDNRollingStart will test this option. conf.setLong("hbase.regionserver.hlog.check.lowreplication.interval", 24L * 60 * 60 * 1000); + + // For slow sync threshold test: roll after 5 slow syncs in 10 seconds + conf.setInt(FSHLog.SLOW_SYNC_ROLL_THRESHOLD, 5); + conf.setInt(FSHLog.SLOW_SYNC_ROLL_INTERVAL_MS, 10 * 1000); + // For slow sync threshold test: roll once after a sync above this threshold + conf.setInt(FSHLog.ROLL_ON_SYNC_TIME_MS, 5000); + + // Slow sync executor. + EXECUTOR = Executors + .newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setNameFormat("Slow-sync-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } @Before @@ -139,6 +163,11 @@ public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @AfterClass + public static void tearDownAfterClass() { + EXECUTOR.shutdownNow(); + } + private void startAndWriteData() throws IOException, InterruptedException { this.server = cluster.getRegionServerThreads().get(0).getRegionServer(); @@ -158,6 +187,74 @@ private void startAndWriteData() throws IOException, InterruptedException { } } + private static void setSyncLatencyMillis(int latency) { + syncLatencyMillis = latency; + } + + protected final AbstractFSWAL getWALAndRegisterSlowSyncHook(RegionInfo region) + throws IOException { + // Get a reference to the wal. + final AbstractFSWAL log = (AbstractFSWAL) server.getWAL(region); + + // Register a WALActionsListener to observe if a SLOW_SYNC roll is requested + log.registerWALActionsListener(new WALActionsListener() { + @Override + public void logRollRequested(RollRequestReason reason) { + switch (reason) { + case SLOW_SYNC: + slowSyncHookCalled.lazySet(true); + break; + default: + break; + } + } + }); + return log; + } + + protected final void checkSlowSync(AbstractFSWAL log, Table table, int slowSyncLatency, + int writeCount, boolean slowSync) throws Exception { + if (slowSyncLatency > 0) { + setSyncLatencyMillis(slowSyncLatency); + setSlowLogWriter(log.conf); + } else { + setDefaultLogWriter(log.conf); + } + + // Set up for test + log.rollWriter(true); + slowSyncHookCalled.set(false); + + final WALProvider.WriterBase oldWriter = log.getWriter(); + + // Write some data + for (int i = 0; i < writeCount; i++) { + writeData(table, rowNum++); + } + + if (slowSync) { + TEST_UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate() { + @Override + public boolean evaluate() throws Exception { + return log.getWriter() != oldWriter; + } + + @Override + public String explainFailure() throws Exception { + return "Waited too long for our test writer to get rolled out"; + } + }); + + assertTrue("Should have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); + } else { + assertFalse("Should not have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); + } + } + + protected abstract void setSlowLogWriter(Configuration conf); + + protected abstract void setDefaultLogWriter(Configuration conf); + /** * Tests that log rolling doesn't hang when no data is written. */ @@ -239,12 +336,10 @@ void validateData(Table table, int rownum) throws IOException { */ @Test public void testCompactionRecordDoesntBlockRolling() throws Exception { - Table table = null; // When the hbase:meta table can be opened, the region servers are running - Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - try { - table = createTestTable(getName()); + try (Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + Table table = createTestTable(getName())) { server = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); HRegion region = server.getRegions(table.getName()).get(0); @@ -286,9 +381,6 @@ public void testCompactionRecordDoesntBlockRolling() throws Exception { log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added. assertEquals("Should have 1 WALs at the end", 1, AbstractFSWALProvider.getNumRolledLogFiles(log)); - } finally { - if (t != null) t.close(); - if (table != null) table.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java index 9dc27a693a7f..804e93eb8f56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java @@ -20,10 +20,17 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; @@ -36,6 +43,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.io.netty.channel.Channel; +import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + @Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestAsyncLogRolling extends AbstractTestLogRolling { @@ -51,6 +61,61 @@ public static void setUpBeforeClass() throws Exception { AbstractTestLogRolling.setUpBeforeClass(); } + public static class SlowSyncLogWriter extends AsyncProtobufLogWriter { + + public SlowSyncLogWriter(EventLoopGroup eventLoopGroup, Class channelClass) { + super(eventLoopGroup, channelClass); + } + + @Override + public CompletableFuture sync(boolean forceSync) { + CompletableFuture future = new CompletableFuture<>(); + super.sync(forceSync).whenCompleteAsync((lengthAfterFlush, error) -> { + EXECUTOR.schedule(() -> { + if (error != null) { + future.completeExceptionally(error); + } else { + future.complete(lengthAfterFlush); + } + }, syncLatencyMillis, TimeUnit.MILLISECONDS); + }); + return future; + } + } + + @Override + protected void setSlowLogWriter(Configuration conf) { + conf.set(AsyncFSWALProvider.WRITER_IMPL, SlowSyncLogWriter.class.getName()); + } + + @Override + protected void setDefaultLogWriter(Configuration conf) { + conf.set(AsyncFSWALProvider.WRITER_IMPL, AsyncProtobufLogWriter.class.getName()); + } + + @Test + public void testSlowSyncLogRolling() throws Exception { + // Create the test table + TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + admin.createTable(desc); + try (Table table = TEST_UTIL.getConnection().getTable(desc.getTableName())) { + server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); + RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo(); + final AbstractFSWAL log = getWALAndRegisterSlowSyncHook(region); + + // Set default log writer, no additional latency to any sync on the hlog. + checkSlowSync(log, table, -1, 10, false); + + // Adds 5000 ms of latency to any sync on the hlog. This will trip the other threshold. + // Write some data. Should only take one sync. + checkSlowSync(log, table, 5000, 1, true); + + // Set default log writer, no additional latency to any sync on the hlog. + checkSlowSync(log, table, -1, 10, false); + } + } + @Test public void testLogRollOnDatanodeDeath() throws IOException, InterruptedException { dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), 3, true, null, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index f07a02cb25d1..9caa47e8614b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; @@ -56,10 +55,9 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.RecoverLeaseFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; +import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hbase.wal.WALStreamReader; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -98,192 +96,30 @@ public static void setUpBeforeClass() throws Exception { conf.setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); conf.set(WALFactory.WAL_PROVIDER, "filesystem"); AbstractTestLogRolling.setUpBeforeClass(); - - // For slow sync threshold test: roll after 5 slow syncs in 10 seconds - TEST_UTIL.getConfiguration().setInt(FSHLog.SLOW_SYNC_ROLL_THRESHOLD, 5); - TEST_UTIL.getConfiguration().setInt(FSHLog.SLOW_SYNC_ROLL_INTERVAL_MS, 10 * 1000); - // For slow sync threshold test: roll once after a sync above this threshold - TEST_UTIL.getConfiguration().setInt(FSHLog.ROLL_ON_SYNC_TIME_MS, 5000); } - @Test - public void testSlowSyncLogRolling() throws Exception { - // Create the test table - TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); - admin.createTable(desc); - Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); - int row = 1; - try { - // Get a reference to the FSHLog - server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); - RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo(); - final FSHLog log = (FSHLog) server.getWAL(region); - - // Register a WALActionsListener to observe if a SLOW_SYNC roll is requested - - final AtomicBoolean slowSyncHookCalled = new AtomicBoolean(); - log.registerWALActionsListener(new WALActionsListener() { - @Override - public void logRollRequested(WALActionsListener.RollRequestReason reason) { - switch (reason) { - case SLOW_SYNC: - slowSyncHookCalled.lazySet(true); - break; - default: - break; - } - } - }); - - // Write some data - - for (int i = 0; i < 10; i++) { - writeData(table, row++); - } - - assertFalse("Should not have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); - - // Set up for test - slowSyncHookCalled.set(false); - - // Wrap the current writer with the anonymous class below that adds 200 ms of - // latency to any sync on the hlog. This should be more than sufficient to trigger - // slow sync warnings. - final Writer oldWriter1 = log.getWriter(); - final Writer newWriter1 = new Writer() { - @Override - public void close() throws IOException { - oldWriter1.close(); - } - - @Override - public void sync(boolean forceSync) throws IOException { - try { - Thread.sleep(200); - } catch (InterruptedException e) { - InterruptedIOException ex = new InterruptedIOException(); - ex.initCause(e); - throw ex; - } - oldWriter1.sync(forceSync); - } - - @Override - public void append(Entry entry) throws IOException { - oldWriter1.append(entry); - } - - @Override - public long getLength() { - return oldWriter1.getLength(); - } - - @Override - public long getSyncedLength() { - return oldWriter1.getSyncedLength(); - } - }; - log.setWriter(newWriter1); - - // Write some data. - // We need to write at least 5 times, but double it. We should only request - // a SLOW_SYNC roll once in the current interval. - for (int i = 0; i < 10; i++) { - writeData(table, row++); - } - - // Wait for our wait injecting writer to get rolled out, as needed. - - TEST_UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate() { - @Override - public boolean evaluate() throws Exception { - return log.getWriter() != newWriter1; - } - - @Override - public String explainFailure() throws Exception { - return "Waited too long for our test writer to get rolled out"; - } - }); - - assertTrue("Should have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); - - // Set up for test - slowSyncHookCalled.set(false); - - // Wrap the current writer with the anonymous class below that adds 5000 ms of - // latency to any sync on the hlog. - // This will trip the other threshold. - final Writer oldWriter2 = (Writer) log.getWriter(); - final Writer newWriter2 = new Writer() { - @Override - public void close() throws IOException { - oldWriter2.close(); - } - - @Override - public void sync(boolean forceSync) throws IOException { - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - InterruptedIOException ex = new InterruptedIOException(); - ex.initCause(e); - throw ex; - } - oldWriter2.sync(forceSync); - } - - @Override - public void append(Entry entry) throws IOException { - oldWriter2.append(entry); - } - - @Override - public long getLength() { - return oldWriter2.getLength(); - } - - @Override - public long getSyncedLength() { - return oldWriter2.getSyncedLength(); - } - }; - log.setWriter(newWriter2); - - // Write some data. Should only take one sync. - - writeData(table, row++); - - // Wait for our wait injecting writer to get rolled out, as needed. - - TEST_UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate() { - @Override - public boolean evaluate() throws Exception { - return log.getWriter() != newWriter2; - } - - @Override - public String explainFailure() throws Exception { - return "Waited too long for our test writer to get rolled out"; - } - }); - - assertTrue("Should have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); - - // Set up for test - slowSyncHookCalled.set(false); - - // Write some data - for (int i = 0; i < 10; i++) { - writeData(table, row++); + public static class SlowSyncLogWriter extends ProtobufLogWriter { + @Override + public void sync(boolean forceSync) throws IOException { + try { + Thread.sleep(syncLatencyMillis); + } catch (InterruptedException e) { + InterruptedIOException ex = new InterruptedIOException(); + ex.initCause(e); + throw ex; } + super.sync(forceSync); + } + } - assertFalse("Should not have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); + @Override + protected void setSlowLogWriter(Configuration conf) { + conf.set(FSHLogProvider.WRITER_IMPL, SlowSyncLogWriter.class.getName()); + } - } finally { - table.close(); - } + @Override + protected void setDefaultLogWriter(Configuration conf) { + conf.set(FSHLogProvider.WRITER_IMPL, ProtobufLogWriter.class.getName()); } void batchWriteAndWait(Table table, final FSHLog log, int start, boolean expect, int timeout) @@ -313,6 +149,36 @@ void batchWriteAndWait(Table table, final FSHLog log, int start, boolean expect, } } + @Test + public void testSlowSyncLogRolling() throws Exception { + // Create the test table + TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + admin.createTable(desc); + try (Table table = TEST_UTIL.getConnection().getTable(desc.getTableName())) { + server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); + RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo(); + final AbstractFSWAL log = getWALAndRegisterSlowSyncHook(region); + + // Set default log writer, no additional latency to any sync on the hlog. + checkSlowSync(log, table, -1, 10, false); + + // Adds 200 ms of latency to any sync on the hlog. This should be more than sufficient to + // trigger slow sync warnings. + // Write some data. + // We need to write at least 5 times, but double it. We should only request + // a SLOW_SYNC roll once in the current interval. + checkSlowSync(log, table, 200, 10, true); + + // Adds 5000 ms of latency to any sync on the hlog. This will trip the other threshold. + // Write some data. Should only take one sync. + checkSlowSync(log, table, 5000, 1, true); + + // Set default log writer, no additional latency to any sync on the hlog. + checkSlowSync(log, table, -1, 10, false); + } + } + /** * Tests that logs are rolled upon detecting datanode death Requires an HDFS jar with HDFS-826 & * syncFs() support (HDFS-200) From 5503962350d6a4fbb7d7e21c4f73a070707d847b Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 12 Dec 2023 10:41:40 +0100 Subject: [PATCH 167/514] HBASE-28252 Add sun.net.dns and sun.net.util to the JDK11+ module exports in the hbase script (#5571) Signed-off-by: Duo Zhang Signed-off-by: Balazs Meszaros --- bin/hbase | 5 +++-- bin/hbase-config.sh | 8 ++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/bin/hbase b/bin/hbase index 60cfb8afef9a..30b4e94a89ae 100755 --- a/bin/hbase +++ b/bin/hbase @@ -496,7 +496,7 @@ add_jdk11_deps_to_classpath() { } add_jdk11_jvm_flags() { - HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED" } add_opentelemetry_agent() { @@ -786,7 +786,7 @@ fi # Add lib/jdk11 jars to the classpath if [ "${DEBUG}" = "true" ]; then - echo "Deciding on addition of lib/jdk11 jars to the classpath" + echo "Deciding on addition of lib/jdk11 jars to the classpath and setting JVM module flags" fi addJDK11Jars=false @@ -879,6 +879,7 @@ export CLASSPATH if [ "${DEBUG}" = "true" ]; then echo "classpath=${CLASSPATH}" >&2 HBASE_OPTS="${HBASE_OPTS} -Xdiag" + echo "HBASE_OPTS=${HBASE_OPTS}" fi # resolve the command arguments diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 104e9a0b67c3..0e8b3feed213 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -178,8 +178,12 @@ EOF fi function read_java_version() { - properties="$("${JAVA_HOME}/bin/java" -XshowSettings:properties -version 2>&1)" - echo "${properties}" | "${GREP}" java.runtime.version | head -1 | "${SED}" -e 's/.* = \([^ ]*\)/\1/' + # Avoid calling java repeatedly + if [ -z "$read_java_version_cached" ]; then + properties="$("${JAVA_HOME}/bin/java" -XshowSettings:properties -version 2>&1)" + read_java_version_cached="$(echo "${properties}" | "${GREP}" java.runtime.version | head -1 | "${SED}" -e 's/.* = \([^ ]*\)/\1/')" + fi + echo "$read_java_version_cached" } # Inspect the system properties exposed by this JVM to identify the major From f406c6bbf03ad3d5ca6bbd14944b8846c9ceb914 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 12 Dec 2023 15:13:40 +0100 Subject: [PATCH 168/514] HBASE-28247 Add java.base/sun.net.dns and java.base/sun.net.util export to jdk11 JVM test flags (#5569) Signed-off-by: Nihal Jain Signed-off-by: Balazs Meszaros --- pom.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 2b1da88e056f..85bcf65108e5 100644 --- a/pom.xml +++ b/pom.xml @@ -981,7 +981,9 @@ --add-opens java.base/java.util=ALL-UNNAMED --add-opens java.base/java.util.concurrent=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED - --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED + --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED + --add-exports java.base/sun.net.dns=ALL-UNNAMED + --add-exports java.base/sun.net.util=ALL-UNNAMED --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED ${hbase-surefire.argLine} @{jacocoArgLine} From 3d117125892ee36e8a66171fba3a223c09bc0b9a Mon Sep 17 00:00:00 2001 From: hiping-tech <58875741+hiping-tech@users.noreply.github.com> Date: Tue, 12 Dec 2023 23:13:01 +0800 Subject: [PATCH 169/514] HBASE-28241 The snapshot operation encountered an NPE and failed. (#5560) Fixed the check for an ongoing Snapshot before proceeding with the merge/split region operation. Co-authored-by: lvhaiping.lhp Signed-off-by: Duo Zhang Signed-off-by: Hui Ruan --- .../MergeTableRegionsProcedure.java | 2 +- .../assignment/SplitTableRegionProcedure.java | 3 +- .../TestMergeTableRegionsProcedure.java | 44 +++++++++++++++ .../TestSplitTableRegionProcedure.java | 53 +++++++++++++++++++ .../procedure/TestSnapshotProcedure.java | 24 +++++++++ 5 files changed, 124 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index c0b47b0bc246..7d4ec71d35b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -442,7 +442,7 @@ protected ProcedureMetrics getProcedureMetrics(MasterProcedureEnv env) { private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOException { // Fail if we are taking snapshot for the given table TableName tn = regionsToMerge[0].getTable(); - if (env.getMasterServices().getSnapshotManager().isTakingSnapshot(tn)) { + if (env.getMasterServices().getSnapshotManager().isTableTakingAnySnapshot(tn)) { throw new MergeRegionException("Skip merging regions " + RegionInfo.getShortNameToLog(regionsToMerge) + ", because we are snapshotting " + tn); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index a0118cbd7b05..2e2182b25d29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -505,7 +505,8 @@ private byte[] getSplitRow() { public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException { // Fail if we are taking snapshot for the given table if ( - env.getMasterServices().getSnapshotManager().isTakingSnapshot(getParentRegion().getTable()) + env.getMasterServices().getSnapshotManager() + .isTableTakingAnySnapshot(getParentRegion().getTable()) ) { setFailure(new IOException("Skip splitting region " + getParentRegion().getShortNameToLog() + ", because we are taking snapshot for the table " + getParentRegion().getTable())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java index abc6fc45ad30..c0c4e355f2bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.assignment; +import static org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.assertProcFailed; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -33,16 +34,20 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility; +import org.apache.hadoop.hbase.master.procedure.TestSnapshotProcedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; @@ -59,6 +64,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; + @Category({ MasterTests.class, LargeTests.class }) public class TestMergeTableRegionsProcedure { @@ -347,6 +355,42 @@ public void testMergeWithoutPONR() throws Exception { assertRegionCount(tableName, initialRegionCount - 1); } + @Test + public void testMergingRegionWhileTakingSnapshot() throws Exception { + final TableName tableName = TableName.valueOf("testMergingRegionWhileTakingSnapshot"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + List tableRegions = createTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + + SnapshotDescription snapshot = + new SnapshotDescription("SnapshotProcedureTest", tableName, SnapshotType.FLUSH); + SnapshotProtos.SnapshotDescription snapshotProto = + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + snapshotProto = SnapshotDescriptionUtils.validate(snapshotProto, + UTIL.getHBaseCluster().getMaster().getConfiguration()); + long snapshotProcId = procExec.submitProcedure( + new TestSnapshotProcedure.DelaySnapshotProcedure(procExec.getEnvironment(), snapshotProto)); + UTIL.getHBaseCluster().getMaster().getSnapshotManager().registerSnapshotProcedure(snapshotProto, + snapshotProcId); + + RegionInfo[] regionsToMerge = new RegionInfo[2]; + regionsToMerge[0] = tableRegions.get(0); + regionsToMerge[1] = tableRegions.get(1); + + long mergeProcId = procExec.submitProcedure( + new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true)); + + ProcedureTestingUtility + .waitProcedure(UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(), mergeProcId); + ProcedureTestingUtility.waitProcedure( + UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(), snapshotProcId); + + assertProcFailed(procExec, mergeProcId); + assertEquals(initialRegionCount, UTIL.getAdmin().getRegions(tableName).size()); + } + private List createTable(final TableName tableName) throws Exception { TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java index 6e25dbab48ce..6ec36e75bea2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java @@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -50,10 +52,12 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility; +import org.apache.hadoop.hbase.master.procedure.TestSnapshotProcedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -69,6 +73,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; + @Category({ MasterTests.class, MediumTests.class }) public class TestSplitTableRegionProcedure { @@ -550,6 +557,52 @@ public void testSplitWithoutPONR() throws Exception { verify(tableName, splitRowNum); } + @Test + public void testSplitRegionWhileTakingSnapshot() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + + // task snapshot + SnapshotDescription snapshot = + new SnapshotDescription("SnapshotProcedureTest", tableName, SnapshotType.FLUSH); + SnapshotProtos.SnapshotDescription snapshotProto = + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + snapshotProto = SnapshotDescriptionUtils.validate(snapshotProto, + UTIL.getHBaseCluster().getMaster().getConfiguration()); + long snapshotProcId = procExec.submitProcedure( + new TestSnapshotProcedure.DelaySnapshotProcedure(procExec.getEnvironment(), snapshotProto)); + UTIL.getHBaseCluster().getMaster().getSnapshotManager().registerSnapshotProcedure(snapshotProto, + snapshotProcId); + + // collect AM metrics before test + collectAssignmentManagerMetrics(); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.waitProcedure(procExec, snapshotProcId); + + ProcedureTestingUtility.assertProcFailed(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, snapshotProcId); + + assertTrue(UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 1); + assertTrue(UTIL.countRows(tableName) == 0); + + assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount()); + assertEquals(splitFailedCount + 1, splitProcMetrics.getFailedCounter().getCount()); + } + private void deleteData(final TableName tableName, final int startDeleteRowNum) throws IOException, InterruptedException { Table t = UTIL.getConnection().getTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java index 04442eb771d8..84a3e84763b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hbase.master.procedure; +import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotState.SNAPSHOT_SNAPSHOT_ONLINE_REGIONS; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Optional; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -51,6 +53,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotState; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -70,6 +73,27 @@ public class TestSnapshotProcedure { protected SnapshotDescription snapshot; protected SnapshotProtos.SnapshotDescription snapshotProto; + public static final class DelaySnapshotProcedure extends SnapshotProcedure { + public DelaySnapshotProcedure() { + } + + public DelaySnapshotProcedure(final MasterProcedureEnv env, + final SnapshotProtos.SnapshotDescription snapshot) { + super(env, snapshot); + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, + MasterProcedureProtos.SnapshotState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + Flow flow = super.executeFromState(env, state); + if (state == SNAPSHOT_SNAPSHOT_ONLINE_REGIONS) { + TimeUnit.SECONDS.sleep(20); + } + return flow; + } + } + @Before public void setup() throws Exception { TEST_UTIL = new HBaseTestingUtil(); From 29bfc610d0433f720a34bc47aadca1433bbb1882 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 13 Dec 2023 14:52:12 +0800 Subject: [PATCH 170/514] HBASE-28244 ProcedureTestingUtility.restart is broken sometimes after HBASE-28199 (#5563) Signed-off-by: Duo Zhang --- .../hbase/procedure2/ProcedureExecutor.java | 46 ++++++++++++++----- .../hbase/procedure2/ProcedureFutureUtil.java | 13 +++++- 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 5aa11811122b..e01a27d74675 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -35,6 +35,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -605,15 +606,23 @@ public void init(int numThreads, boolean abortOnCorruption) throws IOException { this.threadGroup = new ThreadGroup("PEWorkerGroup"); this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout"); this.workerMonitorExecutor = new TimeoutExecutorThread<>(this, threadGroup, "WorkerMonitor"); + ThreadFactory backingThreadFactory = new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + return new Thread(threadGroup, r); + } + }; int size = Math.max(2, Runtime.getRuntime().availableProcessors()); - ThreadPoolExecutor executor = new ThreadPoolExecutor(size, size, 1, TimeUnit.MINUTES, - new LinkedBlockingQueue(), new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat(getClass().getSimpleName() + "-Async-Task-Executor-%d").build()); + ThreadPoolExecutor executor = + new ThreadPoolExecutor(size, size, 1, TimeUnit.MINUTES, new LinkedBlockingQueue(), + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat(getClass().getSimpleName() + "-Async-Task-Executor-%d") + .setThreadFactory(backingThreadFactory).build()); executor.allowCoreThreadTimeOut(true); this.asyncTaskExecutor = executor; - forceUpdateExecutor = Executors.newSingleThreadExecutor( - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build()); + forceUpdateExecutor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("Force-Update-PEWorker-%d").setThreadFactory(backingThreadFactory).build()); store.registerListener(new ProcedureStoreListener() { @Override @@ -684,10 +693,10 @@ public void startWorkers() throws IOException { } public void stop() { - if (!running.getAndSet(false)) { - return; - } - + // it is possible that we fail in init, while loading procedures, so we will not set running to + // true but we should have already started the ProcedureScheduler, and also the two + // ExecutorServices, so here we do not check running state, just stop them + running.set(false); LOG.info("Stopping"); scheduler.stop(); timeoutExecutor.sendStopSignal(); @@ -708,14 +717,29 @@ public void join() { for (WorkerThread worker : workerThreads) { worker.awaitTermination(); } + try { + if (!forceUpdateExecutor.awaitTermination(5, TimeUnit.SECONDS)) { + LOG.warn("There are still pending tasks in forceUpdateExecutor"); + } + } catch (InterruptedException e) { + LOG.warn("interrupted while waiting for forceUpdateExecutor termination", e); + Thread.currentThread().interrupt(); + } + try { + if (!asyncTaskExecutor.awaitTermination(5, TimeUnit.SECONDS)) { + LOG.warn("There are still pending tasks in asyncTaskExecutor"); + } + } catch (InterruptedException e) { + LOG.warn("interrupted while waiting for asyncTaskExecutor termination", e); + Thread.currentThread().interrupt(); + } // Destroy the Thread Group for the executors // TODO: Fix. #join is not place to destroy resources. try { threadGroup.destroy(); } catch (IllegalThreadStateException e) { - LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, - e.getMessage()); + LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, e); // This dumps list of threads on STDOUT. this.threadGroup.list(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java index 8ca4cba245da..997063c3097d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFutureUtil.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; import java.util.function.Consumer; import java.util.function.Supplier; import org.apache.commons.lang3.mutable.MutableBoolean; @@ -61,6 +62,14 @@ public static void suspendIfNecessary(Procedure proc, throws IOException, ProcedureSuspendedException { MutableBoolean completed = new MutableBoolean(false); Thread currentThread = Thread.currentThread(); + // This is for testing. In ProcedureTestingUtility, we will restart a ProcedureExecutor and + // reuse it, for performance, so we need to make sure that all the procedure have been stopped. + // But here, the callback of this future is not executed in a PEWorker, so in ProcedureExecutor + // we have no way to stop it. So here, we will get the asyncTaskExecutor first, in the PEWorker + // thread, where the ProcedureExecutor should have not been stopped yet, then when calling the + // callback, if the ProcedureExecutor have already been stopped and restarted, the + // asyncTaskExecutor will also be shutdown so we can not add anything back to the scheduler. + ExecutorService asyncTaskExecutor = env.getAsyncTaskExecutor(); FutureUtils.addListener(future, (r, e) -> { if (Thread.currentThread() == currentThread) { LOG.debug("The future has completed while adding callback, give up suspending procedure {}", @@ -77,7 +86,7 @@ public static void suspendIfNecessary(Procedure proc, // And what makes things worse is that, we persist procedure state to master local region, // where the AsyncFSWAL implementation will use the same netty's event loop for dealing with // I/O, which could even cause dead lock. - env.getAsyncTaskExecutor().execute(() -> { + asyncTaskExecutor.execute(() -> { // should acquire procedure execution lock to make sure that the procedure executor has // finished putting this procedure to the WAITING_TIMEOUT state, otherwise there could be // race and cause unexpected result @@ -89,7 +98,7 @@ public static void suspendIfNecessary(Procedure proc, } catch (IOException ioe) { LOG.error("Error while acquiring execution lock for procedure {}" + " when trying to wake it up, aborting...", proc, ioe); - env.getMasterServices().abort("Can not acquire procedure execution lock", e); + env.getMasterServices().abort("Can not acquire procedure execution lock", ioe); return; } try { From 9eb6cc4f66e02a9b9a59d345b199fd7638843415 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 14 Dec 2023 07:46:36 -0500 Subject: [PATCH 171/514] HBASE-28029 Netty SSL throughput improvement (#5580) Signed-off-by: Duo Zhang Reviewed-by: Nihal Jain --- .../org/apache/hadoop/hbase/io/crypto/tls/X509Util.java | 9 +++++++++ .../java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java index 41acfbbf48f4..fff32866fb9a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java @@ -105,6 +105,15 @@ public final class X509Util { public static final String HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT = "hbase.server.netty.tls.supportplaintext"; + /** + * Set the SSL wrapSize for netty. This is only a maximum wrap size. Buffers smaller than this + * will not be consolidated, but buffers larger than this will be split into multiple wrap + * buffers. The netty default of 16k is not great for hbase which tends to return larger payloads + * than that, meaning most responses end up getting chunked up. This leads to more memory + * contention in netty's PoolArena. See https://github.com/netty/netty/pull/13551 + */ + public static final String HBASE_SERVER_NETTY_TLS_WRAP_SIZE = "hbase.server.netty.tls.wrapSize"; + public static final int DEFAULT_HBASE_SERVER_NETTY_TLS_WRAP_SIZE = 1024 * 1024; // // Client-side specific configs // diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index 722ee1d28c91..ceff84a90e11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hbase.ipc; +import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.DEFAULT_HBASE_SERVER_NETTY_TLS_WRAP_SIZE; import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.HBASE_SERVER_NETTY_TLS_ENABLED; import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT; +import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.HBASE_SERVER_NETTY_TLS_WRAP_SIZE; import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.TLS_CONFIG_REVERSE_DNS_LOOKUP_ENABLED; import java.io.IOException; @@ -408,6 +410,9 @@ private void initSSL(ChannelPipeline p, boolean supportPlaintext) sslHandler = nettySslContext.newHandler(p.channel().alloc()); } + sslHandler.setWrapDataSize( + conf.getInt(HBASE_SERVER_NETTY_TLS_WRAP_SIZE, DEFAULT_HBASE_SERVER_NETTY_TLS_WRAP_SIZE)); + p.addLast("ssl", sslHandler); LOG.debug("SSL handler added for channel: {}", p.channel()); } From 4ffdcea08f67cb1ae76f17b9d6a2653a45f2781c Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Thu, 14 Dec 2023 21:52:01 +0530 Subject: [PATCH 172/514] HBASE-28245 Sync internal protobuf version for hbase to be same as hbase-thirdparty (#5564) Signed-off-by: Duo Zhang --- hbase-examples/pom.xml | 2 +- hbase-protocol-shaded/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 1a5ca5bd09aa..5a7d8f957da0 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -33,7 +33,7 @@ - 3.21.12 + 3.24.3 diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 1926e049cb9b..b2d9f79bbd08 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -34,7 +34,7 @@ - 3.21.12 + 3.24.3 false @@ -3159,6 +3162,7 @@ testapidocs User API The HBase Application Programmer's API + Apache HBase™ ${project.version} API org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress* false From 4e0af3b7ae7cd751f5c544de136783d606c9f825 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 3 Jan 2024 18:01:07 +0800 Subject: [PATCH 191/514] HBASE-28290 Addendum also change title for test javadoc (#5601) Signed-off-by: Nihal Jain Signed-off-by: Peter Somogyi --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index 20efab111c09..2fbd2769029a 100644 --- a/pom.xml +++ b/pom.xml @@ -3044,7 +3044,7 @@ testdevapidocs Test Developer API The full HBase API test code, including private and unstable APIs - Apache HBase™ ${project.version} API + Apache HBase™ ${project.version} API **/generated/* **/protobuf/* @@ -3160,9 +3160,9 @@ true testapidocs - User API + Test User API The HBase Application Programmer's API - Apache HBase™ ${project.version} API + Apache HBase™ ${project.version} API org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress* false From 4a545581ca92a4a0e54a777f0e846e5c7af75b9f Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Wed, 3 Jan 2024 18:56:44 +0530 Subject: [PATCH 192/514] HBASE-28249 Bump jruby to 9.3.13.0 and related joni and jcodings to 2.2.1 and 1.0.58 respectively (#5568) - Also fix "NoMethodError: undefined method runtime for JRuby:Module" caused due to bump Signed-off-by: Rajeshbabu Chintaguntla --- hbase-shell/src/main/ruby/jar-bootstrap.rb | 8 ++++++++ pom.xml | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/hbase-shell/src/main/ruby/jar-bootstrap.rb b/hbase-shell/src/main/ruby/jar-bootstrap.rb index ed8f9b3a3a29..63cb0a755449 100644 --- a/hbase-shell/src/main/ruby/jar-bootstrap.rb +++ b/hbase-shell/src/main/ruby/jar-bootstrap.rb @@ -37,6 +37,14 @@ # hbase hacking. include Java +# Required to access JRuby-specific internal features, such as `JRuby.runtime` +# Loading 'java' was automatically loading 'jruby' until JRuby 9.2. +# But, it has changed since JRuby 9.3. JRuby 9.3+ needs loading 'jruby' explicitly. +# +# See also: https://github.com/jruby/jruby/issues/7221#issuecomment-1133646241 +# +require 'jruby' + # Some goodies for hirb. Should these be left up to the user's discretion? if $stdin.tty? require 'irb/completion' diff --git a/pom.xml b/pom.xml index 2fbd2769029a..bbecc3a72d86 100644 --- a/pom.xml +++ b/pom.xml @@ -846,7 +846,7 @@ 2.1.1 2.3.2 3.0.1-b08 - 9.3.9.0 + 9.3.13.0 4.13.2 1.3 1.15.0 @@ -863,8 +863,8 @@ 2.4.1 1.5.4 - 2.1.43 - 1.0.57 + 2.2.1 + 1.0.58 2.12.2 1.76 1.5.1 From 119885415c2f1770d95fdf832d249cbf7a0c80f4 Mon Sep 17 00:00:00 2001 From: mrzhao Date: Fri, 5 Jan 2024 17:15:17 +0800 Subject: [PATCH 193/514] HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581) Signed-off-by: Wellington Chevreuil Signed-off-by: Nihal Jain --- bin/hbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/hbase b/bin/hbase index 30b4e94a89ae..e329c5070899 100755 --- a/bin/hbase +++ b/bin/hbase @@ -496,7 +496,7 @@ add_jdk11_deps_to_classpath() { } add_jdk11_jvm_flags() { - HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED" } add_opentelemetry_agent() { From 849eee46c937bc9bed801d90e2402f4b1a901de0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 6 Jan 2024 16:47:17 +0800 Subject: [PATCH 194/514] HBASE-28290 Addendum use 'Test API' for test javadoc (#5602) Signed-off-by: Nihal Jain --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index bbecc3a72d86..ae6a75fbae92 100644 --- a/pom.xml +++ b/pom.xml @@ -3044,7 +3044,7 @@ testdevapidocs Test Developer API The full HBase API test code, including private and unstable APIs - Apache HBase™ ${project.version} API + Apache HBase™ ${project.version} Test API **/generated/* **/protobuf/* @@ -3161,8 +3161,8 @@ true testapidocs Test User API - The HBase Application Programmer's API - Apache HBase™ ${project.version} API + The HBase Application Programmer's API test code + Apache HBase™ ${project.version} Test API org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress* false From 4aeabdcc7156f21aea4155972b0d0d8c5dff4966 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 6 Jan 2024 16:47:56 +0800 Subject: [PATCH 195/514] HBASE-28277 Move minimum hadoop 3 support to 3.3.x for 2.6+ (#5598) Signed-off-by: Bryan Beaudreault --- dev-support/hbase-personality.sh | 11 +++++++++-- pom.xml | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 67aa2d1d168f..c7131c45e2f1 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -602,13 +602,20 @@ function hadoopcheck_rebuild else hbase_hadoop3_versions="3.1.1 3.1.2 3.1.3 3.1.4 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.3.0 3.3.1 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6" fi - else - yetus_info "Setting Hadoop 3 versions to test based on branch-2.5+/master/feature branch rules" + elif [[ "${PATCH_BRANCH}" = branch-2.5 ]]; then + yetus_info "Setting Hadoop 3 versions to test based on branch-2.5 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop3_versions="3.2.4 3.3.6" else hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6" fi + else + yetus_info "Setting Hadoop 3 versions to test based on branch-2.6+/master/feature branch rules" + if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then + hbase_hadoop3_versions="3.3.6" + else + hbase_hadoop3_versions="3.3.5 3.3.6" + fi fi export MAVEN_OPTS="${MAVEN_OPTS}" diff --git a/pom.xml b/pom.xml index ae6a75fbae92..7d013de887a2 100644 --- a/pom.xml +++ b/pom.xml @@ -803,7 +803,7 @@ 3.5.0 ${compileSource} - 3.2.4 + 3.3.5 ${hadoop-three.version} From 1176e92cc646111313c2fa6a7a8e9092aef1f5c9 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Sun, 7 Jan 2024 20:00:36 +0530 Subject: [PATCH 196/514] HBASE-28295 Fews tests are failing due to NCDFE: org/bouncycastle/operator/OperatorCreationException (#5608) Signed-off-by: Duo Zhang --- hbase-mapreduce/pom.xml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 3d9877dbf787..47e3f40363c5 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -216,6 +216,11 @@ bcprov-jdk18on test + + org.bouncycastle + bcpkix-jdk18on + test + From 71cecb0d2f3e63bf4cb4b85347753a75ffa18c00 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Mon, 8 Jan 2024 05:15:21 -0500 Subject: [PATCH 197/514] HBASE-26268 Provide coprocessor hooks for updateConfiguration and clearRegionBlockCache (#5593) Co-authored-by: Charles Connell Signed-off-by: Nick Dimiduk --- .../apache/hadoop/hbase/HBaseServerBase.java | 41 ++++++++++++++---- .../hbase/coprocessor/MasterObserver.java | 21 +++++++++ .../coprocessor/RegionServerObserver.java | 43 +++++++++++++++++++ .../apache/hadoop/hbase/master/HMaster.java | 5 +++ .../hbase/master/MasterCoprocessorHost.java | 18 ++++++++ .../hbase/regionserver/HRegionServer.java | 5 +++ .../hbase/regionserver/RSRpcServices.java | 28 +++++++----- .../RegionServerCoprocessorHost.java | 37 ++++++++++++++++ .../security/access/AccessController.java | 23 ++++++++++ .../hbase/coprocessor/TestMasterObserver.java | 34 +++++++++++++++ .../security/access/TestAccessController.java | 35 +++++++++++++++ 11 files changed, 271 insertions(+), 19 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index 36f4f3addf5c..f64f2947ee96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -46,16 +46,19 @@ import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; import org.apache.hadoop.hbase.regionserver.ChunkCreator; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager; import org.apache.hadoop.hbase.regionserver.MemStoreLAB; +import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ShutdownHook; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; @@ -184,14 +187,14 @@ public abstract class HBaseServerBase> extends protected final NettyEventLoopGroupConfig eventLoopGroupConfig; - /** - * If running on Windows, do windows-specific setup. - */ - private static void setupWindows(final Configuration conf, ConfigurationManager cm) { + private void setupSignalHandlers() { if (!SystemUtils.IS_OS_WINDOWS) { HBasePlatformDependent.handle("HUP", (number, name) -> { - conf.reloadConfiguration(); - cm.notifyAllObservers(conf); + try { + updateConfiguration(); + } catch (IOException e) { + LOG.error("Problem while reloading configuration", e); + } }); } } @@ -276,7 +279,7 @@ public HBaseServerBase(Configuration conf, String name) throws IOException { new ZKWatcher(conf, getProcessName() + ":" + addr.getPort(), this, canCreateBaseZNode()); this.configurationManager = new ConfigurationManager(); - setupWindows(conf, configurationManager); + setupSignalHandlers(); initializeFileSystem(); @@ -614,11 +617,31 @@ public ConfigurationManager getConfigurationManager() { /** * Reload the configuration from disk. */ - public void updateConfiguration() { + public void updateConfiguration() throws IOException { LOG.info("Reloading the configuration from disk."); // Reload the configuration from disk. + preUpdateConfiguration(); conf.reloadConfiguration(); configurationManager.notifyAllObservers(conf); + postUpdateConfiguration(); + } + + private void preUpdateConfiguration() throws IOException { + CoprocessorHost coprocessorHost = getCoprocessorHost(); + if (coprocessorHost instanceof RegionServerCoprocessorHost) { + ((RegionServerCoprocessorHost) coprocessorHost).preUpdateConfiguration(conf); + } else if (coprocessorHost instanceof MasterCoprocessorHost) { + ((MasterCoprocessorHost) coprocessorHost).preUpdateConfiguration(conf); + } + } + + private void postUpdateConfiguration() throws IOException { + CoprocessorHost coprocessorHost = getCoprocessorHost(); + if (coprocessorHost instanceof RegionServerCoprocessorHost) { + ((RegionServerCoprocessorHost) coprocessorHost).postUpdateConfiguration(conf); + } else if (coprocessorHost instanceof MasterCoprocessorHost) { + ((MasterCoprocessorHost) coprocessorHost).postUpdateConfiguration(conf); + } } @Override @@ -626,6 +649,8 @@ public String toString() { return getServerName().toString(); } + protected abstract CoprocessorHost getCoprocessorHost(); + protected abstract boolean canCreateBaseZNode(); protected abstract String getProcessName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 820fef71fd07..d0e451508b43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.MetaMutationAnnotation; @@ -1873,4 +1874,24 @@ default void preHasUserPermissions(ObserverContext default void postHasUserPermissions(ObserverContext ctx, String userName, List permissions) throws IOException { } + + /** + * Called before reloading the HMaster's {@link Configuration} from disk + * @param ctx the coprocessor instance's environment + * @param preReloadConf the {@link Configuration} in use prior to reload + * @throws IOException if you need to signal an IO error + */ + default void preUpdateMasterConfiguration(ObserverContext ctx, + Configuration preReloadConf) throws IOException { + } + + /** + * Called after reloading the HMaster's {@link Configuration} from disk + * @param ctx the coprocessor instance's environment + * @param postReloadConf the {@link Configuration} that was loaded + * @throws IOException if you need to signal an IO error + */ + default void postUpdateMasterConfiguration(ObserverContext ctx, + Configuration postReloadConf) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index 236667b4be7b..b6915ffaaeac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; @@ -169,4 +171,45 @@ default void postReplicationSinkBatchMutate( } + /** + * Called before clearing the block caches for one or more regions + * @param ctx the coprocessor instance's environment + * @throws IOException if you need to signal an IO error + */ + default void preClearRegionBlockCache(ObserverContext ctx) + throws IOException { + } + + /** + * Called after clearing the block caches for one or more regions + * @param ctx the coprocessor instance's environment + * @param stats statistics about the cache evictions that happened + * @throws IOException if you need to signal an IO error + */ + default void postClearRegionBlockCache(ObserverContext ctx, + CacheEvictionStats stats) throws IOException { + } + + /** + * Called before reloading the RegionServer's {@link Configuration} from disk + * @param ctx the coprocessor instance's environment + * @param preReloadConf the {@link Configuration} in use prior to reload + * @throws IOException if you need to signal an IO error + */ + default void preUpdateRegionServerConfiguration( + ObserverContext ctx, Configuration preReloadConf) + throws IOException { + } + + /** + * Called after reloading the RegionServer's {@link Configuration} from disk + * @param ctx the coprocessor instance's environment + * @param postReloadConf the {@link Configuration} that was loaded + * @throws IOException if you need to signal an IO error + */ + default void postUpdateRegionServerConfiguration( + ObserverContext ctx, Configuration postReloadConf) + throws IOException { + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8567f00cad0a..88b82f01069e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -724,6 +724,11 @@ public MasterRpcServices getMasterRpcServices() { return rpcServices; } + @Override + protected MasterCoprocessorHost getCoprocessorHost() { + return getMasterCoprocessorHost(); + } + public boolean balanceSwitch(final boolean b) throws IOException { return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 3af69b362609..e3d269973f8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -2114,4 +2114,22 @@ public void call(MasterObserver observer) throws IOException { } }); } + + public void preUpdateConfiguration(Configuration preReloadConf) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { + @Override + public void call(MasterObserver observer) throws IOException { + observer.preUpdateMasterConfiguration(this, preReloadConf); + } + }); + } + + public void postUpdateConfiguration(Configuration postReloadConf) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { + @Override + public void call(MasterObserver observer) throws IOException { + observer.postUpdateMasterConfiguration(this, postReloadConf); + } + }); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b6c573a953f1..a77fa0cd879e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -587,6 +587,11 @@ protected String getProcessName() { return REGIONSERVER; } + @Override + protected RegionServerCoprocessorHost getCoprocessorHost() { + return getRegionServerCoprocessorHost(); + } + @Override protected boolean canCreateBaseZNode() { return !clusterMode(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 4f04457e91b6..a43fac6993e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3802,19 +3802,25 @@ public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController contr @Override public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, ClearRegionBlockCacheRequest request) throws ServiceException { - rpcPreCheck("clearRegionBlockCache"); - ClearRegionBlockCacheResponse.Builder builder = ClearRegionBlockCacheResponse.newBuilder(); - CacheEvictionStatsBuilder stats = CacheEvictionStats.builder(); - List regions = getRegions(request.getRegionList(), stats); - for (HRegion region : regions) { - try { - stats = stats.append(this.server.clearRegionBlockCache(region)); - } catch (Exception e) { - stats.addException(region.getRegionInfo().getRegionName(), e); + try { + rpcPreCheck("clearRegionBlockCache"); + ClearRegionBlockCacheResponse.Builder builder = ClearRegionBlockCacheResponse.newBuilder(); + CacheEvictionStatsBuilder stats = CacheEvictionStats.builder(); + server.getRegionServerCoprocessorHost().preClearRegionBlockCache(); + List regions = getRegions(request.getRegionList(), stats); + for (HRegion region : regions) { + try { + stats = stats.append(this.server.clearRegionBlockCache(region)); + } catch (Exception e) { + stats.addException(region.getRegionInfo().getRegionName(), e); + } } + stats.withMaxCacheSize(server.getBlockCache().map(BlockCache::getMaxSize).orElse(0L)); + server.getRegionServerCoprocessorHost().postClearRegionBlockCache(stats.build()); + return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build(); + } catch (IOException e) { + throw new ServiceException(e); } - stats.withMaxCacheSize(server.getBlockCache().map(BlockCache::getMaxSize).orElse(0L)); - return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build(); } private void executeOpenRegionProcedures(OpenRegionRequest request, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java index af1e923760d9..06eabdad67d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.lang.reflect.InvocationTargetException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Mutation; @@ -240,6 +241,42 @@ public void call(RegionServerObserver observer) throws IOException { }); } + public void preUpdateConfiguration(Configuration preReloadConf) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { + @Override + public void call(RegionServerObserver observer) throws IOException { + observer.preUpdateRegionServerConfiguration(this, preReloadConf); + } + }); + } + + public void postUpdateConfiguration(Configuration postReloadConf) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { + @Override + public void call(RegionServerObserver observer) throws IOException { + observer.postUpdateRegionServerConfiguration(this, postReloadConf); + } + }); + } + + public void preClearRegionBlockCache() throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { + @Override + public void call(RegionServerObserver observer) throws IOException { + observer.preClearRegionBlockCache(this); + } + }); + } + + public void postClearRegionBlockCache(CacheEvictionStats stats) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { + @Override + public void call(RegionServerObserver observer) throws IOException { + observer.postClearRegionBlockCache(this, stats); + } + }); + } + /** * Coprocessor environment extension providing access to region server related services. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 90a51e2cb03e..66a7b3a27032 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -2576,4 +2576,27 @@ public void preUpdateRSGroupConfig(final ObserverContext ctx) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "clearRegionBlockCache", null, + Permission.Action.ADMIN); + } + + @Override + public void preUpdateRegionServerConfiguration( + ObserverContext ctx, Configuration preReloadConf) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "updateConfiguration", null, + Permission.Action.ADMIN); + } + + @Override + public void preUpdateMasterConfiguration(ObserverContext ctx, + Configuration preReloadConf) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "updateConfiguration", null, + Permission.Action.ADMIN); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 9e65e58a56d2..cf647b9fe8a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -191,6 +191,8 @@ public static class CPMasterObserver implements MasterCoprocessor, MasterObserve private boolean postLockHeartbeatCalled; private boolean preMasterStoreFlushCalled; private boolean postMasterStoreFlushCalled; + private boolean preUpdateMasterConfigurationCalled; + private boolean postUpdateMasterConfigurationCalled; public void resetStates() { preCreateTableRegionInfosCalled = false; @@ -284,6 +286,8 @@ public void resetStates() { postLockHeartbeatCalled = false; preMasterStoreFlushCalled = false; postMasterStoreFlushCalled = false; + preUpdateMasterConfigurationCalled = false; + postUpdateMasterConfigurationCalled = false; } @Override @@ -1264,6 +1268,17 @@ public void postRollBackMergeRegionsAction( throws IOException { } + @Override + public void preUpdateMasterConfiguration(ObserverContext ctx, + Configuration preReloadConf) throws IOException { + preUpdateMasterConfigurationCalled = true; + } + + @Override + public void postUpdateMasterConfiguration(ObserverContext ctx, + Configuration postReloadConf) throws IOException { + postUpdateMasterConfigurationCalled = true; + } } private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -1715,4 +1730,23 @@ public void testMasterStoreOperations() throws Exception { assertTrue("Master store flush called", cp.postMasterStoreFlushCalled); } } + + @Test + public void testUpdateConfiguration() throws Exception { + SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + MasterCoprocessorHost host = master.getMasterCoprocessorHost(); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); + cp.resetStates(); + assertFalse("No update configuration call", cp.preUpdateMasterConfigurationCalled); + assertFalse("No update configuration call", cp.postUpdateMasterConfigurationCalled); + + try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); + Admin admin = connection.getAdmin()) { + admin.updateConfiguration(); + + assertTrue("Update configuration called", cp.preUpdateMasterConfigurationCalled); + assertTrue("Update configuration called", cp.postUpdateMasterConfigurationCalled); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 11ae3b3ecf09..cc895d21ac61 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -3086,6 +3086,41 @@ public Object run() throws Exception { verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); } + @Test + public void testUpdateMasterConfiguration() throws Exception { + AccessTestAction action = () -> { + ACCESS_CONTROLLER.preUpdateMasterConfiguration(ObserverContextImpl.createAndPrepare(CP_ENV), + null); + return null; + }; + + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testUpdateRegionServerConfiguration() throws Exception { + AccessTestAction action = () -> { + ACCESS_CONTROLLER + .preUpdateRegionServerConfiguration(ObserverContextImpl.createAndPrepare(RSCP_ENV), null); + return null; + }; + + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testClearRegionBlockCache() throws Exception { + AccessTestAction action = () -> { + ACCESS_CONTROLLER.preClearRegionBlockCache(ObserverContextImpl.createAndPrepare(RSCP_ENV)); + return null; + }; + + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + @Test public void testTransitSyncReplicationPeerState() throws Exception { AccessTestAction action = new AccessTestAction() { From 0916c72929e238735e3dc3ec09dbf935e8d6ad69 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 9 Jan 2024 16:50:29 +0100 Subject: [PATCH 198/514] HBASE-28261 Sync jvm11 module flags from hbase-surefire.jdk11.flags to bin/hbase (#5610) Signed-off-by: Nihal Jain Signed-off-by: Peter Somogyi --- bin/hbase | 3 ++- pom.xml | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/bin/hbase b/bin/hbase index e329c5070899..81379eaa587d 100755 --- a/bin/hbase +++ b/bin/hbase @@ -496,7 +496,8 @@ add_jdk11_deps_to_classpath() { } add_jdk11_jvm_flags() { - HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED" + # Keep in sync with hbase-surefire.jdk11.flags in the root pom.xml + HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-opens java.base/java.util=ALL-UNNAMED --add-opens java.base/java.util.concurrent=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED" } add_opentelemetry_agent() { diff --git a/pom.xml b/pom.xml index 7d013de887a2..6788884c2111 100644 --- a/pom.xml +++ b/pom.xml @@ -971,8 +971,11 @@ "-Djava.library.path=${hadoop.library.path};${java.library.path}" -Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced -Dio.opentelemetry.context.enableStrictContext=true + -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported + --add-opens java.base/java.io=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED @@ -984,6 +987,8 @@ --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED + --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED ${hbase-surefire.argLine} @{jacocoArgLine} From a09305d5854fc98300426271fad3b53a69d2ae71 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 10 Jan 2024 11:13:09 +0800 Subject: [PATCH 199/514] HBASE-28297 IntegrationTestImportTsv fails with ArrayIndexOfOutBounds (#5612) Signed-off-by: Nihal Jain --- .../hadoop/hbase/mapreduce/IntegrationTestImportTsv.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 35db989dd691..2e9ff8279365 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -60,6 +60,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; /** * Validate ImportTsv + BulkLoadFiles on a distributed cluster. @@ -85,6 +86,9 @@ public class IntegrationTestImportTsv extends Configured implements Tool { { byte[] family = Bytes.toBytes("d"); for (String line : Splitter.on('\n').split(simple_tsv)) { + if (Strings.isNullOrEmpty(line)) { + continue; + } String[] row = line.split("\t"); byte[] key = Bytes.toBytes(row[0]); long ts = Long.parseLong(row[1]); From 0db423101702b277b571f465f598aab3bfcce736 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Wed, 10 Jan 2024 17:13:42 +0530 Subject: [PATCH 200/514] [ADDENDUM] HBASE-28295 Few tests are failing due to NCDFE: org/bouncycastle/operator/OperatorCreationException (#5611) Signed-off-by: Duo Zhang --- hbase-backup/pom.xml | 6 ++++++ hbase-it/pom.xml | 6 ++++++ hbase-mapreduce/pom.xml | 1 + 3 files changed, 13 insertions(+) diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 478c8d8245ea..77d033d67fe6 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -105,6 +105,12 @@ + + + org.bouncycastle + bcpkix-jdk18on + test + org.apache.commons diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index c3f884a16b0d..dbf34c404155 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -104,6 +104,12 @@ + + + org.bouncycastle + bcpkix-jdk18on + test + org.apache.hbase hbase-backup diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 47e3f40363c5..798250600e35 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -216,6 +216,7 @@ bcprov-jdk18on test + org.bouncycastle bcpkix-jdk18on From 8cab5a2dfb79304440d7c92a0d7170cd0f3e0739 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 11 Jan 2024 07:54:10 -0500 Subject: [PATCH 201/514] HBASE-28304 Add hbase-shaded-testing-util version to dependencyManagement (#5618) Signed-off-by: Duo Zhang --- hbase-shaded/hbase-shaded-testing-util-tester/pom.xml | 1 - pom.xml | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml b/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml index 928a3276df65..441d38ccd47f 100644 --- a/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml +++ b/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml @@ -81,7 +81,6 @@ org.apache.hbase hbase-shaded-testing-util - ${project.version} test diff --git a/pom.xml b/pom.xml index 6788884c2111..a59379f8dc92 100644 --- a/pom.xml +++ b/pom.xml @@ -1284,6 +1284,11 @@ hbase-shaded-mapreduce ${project.version} + + org.apache.hbase + hbase-shaded-testing-util + ${project.version} + org.apache.hbase hbase-asyncfs From a7429a75f22e2228154eb30383fbffee86a2f103 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Thu, 11 Jan 2024 23:44:35 +0530 Subject: [PATCH 202/514] HBASE-28301 IntegrationTestImportTsv fails with UnsupportedOperationException (#5613) Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi --- .../hbase/mapreduce/IntegrationTestImportTsv.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 2e9ff8279365..e5c1fbed1a56 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -145,12 +145,10 @@ protected void doLoadIncrementalHFiles(Path hfiles, TableName tableName) throws ToolRunner.run(new BulkLoadHFilesTool(getConf()), args)); Table table = null; - Scan scan = new Scan() { - { - setCacheBlocks(false); - setCaching(1000); - } - }; + Scan scan = new Scan(); + scan.setCacheBlocks(false); + scan.setCaching(1000); + try { table = util.getConnection().getTable(tableName); Iterator resultsIt = table.getScanner(scan).iterator(); From d57e4e238d6255a634762bd5f41db6d84803f703 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Jan 2024 14:50:29 +0800 Subject: [PATCH 203/514] HBASE-28308 Bump gitpython from 3.1.37 to 3.1.41 in /dev-support/flaky-tests (#5616) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.37 to 3.1.41. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.37...3.1.41) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index 9ca8c66afb1a..0e09594aec8a 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -17,6 +17,6 @@ # requests==2.31.0 future==0.18.3 -gitpython==3.1.37 +gitpython==3.1.41 rbtools==4.0 jinja2==3.1.2 From 8ae77e2908cf1ce56d6b06f6492cfd731a8962ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Jan 2024 14:51:28 +0800 Subject: [PATCH 204/514] HBASE-28309 Bump gitpython in /dev-support/git-jira-release-audit (#5617) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.37 to 3.1.41. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.37...3.1.41) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 23a4b916fd4b..99fb2e2d0de9 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -23,7 +23,7 @@ cryptography==41.0.6 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 -GitPython==3.1.37 +GitPython==3.1.41 idna==2.8 jira==2.0.0 oauthlib==3.1.0 From 6133d74318fc61f83faa865c4b307aa5cc7a8c29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Jan 2024 14:52:56 +0800 Subject: [PATCH 205/514] HBASE-28310 Bump jinja2 from 3.1.2 to 3.1.3 in /dev-support/flaky-tests (#5619) Bumps [jinja2](https://github.com/pallets/jinja) from 3.1.2 to 3.1.3. - [Release notes](https://github.com/pallets/jinja/releases) - [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/jinja/compare/3.1.2...3.1.3) --- updated-dependencies: - dependency-name: jinja2 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index 0e09594aec8a..8ef087b0036d 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -19,4 +19,4 @@ requests==2.31.0 future==0.18.3 gitpython==3.1.41 rbtools==4.0 -jinja2==3.1.2 +jinja2==3.1.3 From 4d2b8f8de668f0b5938c7665c9730133cd48b8da Mon Sep 17 00:00:00 2001 From: liuwenjing17 <154434091+liuwenjing17@users.noreply.github.com> Date: Sat, 13 Jan 2024 17:39:28 +0800 Subject: [PATCH 206/514] HBASE-28287 MOB HFiles are expired earlier than their reference data (#5599) Co-authored-by: liuwenjing3 Signed-off-by: Duo Zhang --- .../src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java | 1 + .../org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index e04d67a0aaaf..60f0f126ab60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -280,6 +280,7 @@ public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, Table calendar.set(Calendar.HOUR_OF_DAY, 0); calendar.set(Calendar.MINUTE, 0); calendar.set(Calendar.SECOND, 0); + calendar.set(Calendar.MILLISECOND, 0); Date expireDate = calendar.getTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java index f282c6f9d8f4..4bbc88681290 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java @@ -140,7 +140,8 @@ public void testCleaner() throws Exception { assertEquals("Before cleanup without delay 1", 1, firstFiles.length); String firstFile = firstFiles[0].getPath().getName(); - ts = EnvironmentEdgeManager.currentTime() - 1 * secondsOfDay() * 1000; // 1 day before + // 1.5 day before + ts = (long) (EnvironmentEdgeManager.currentTime() - 1.5 * secondsOfDay() * 1000); putKVAndFlush(table, row2, dummyData, ts); FileStatus[] secondFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); // now there are 2 mob files From e3a0174e20542e661f787a874870b629a274daf5 Mon Sep 17 00:00:00 2001 From: haosen chen <99318736+haosenchen@users.noreply.github.com> Date: Sat, 13 Jan 2024 17:42:57 +0800 Subject: [PATCH 207/514] HBASE-28305 Add "Uncompressed StoreFileSize" column to the table.jsp (#5620) Co-authored-by: Haosen Chen Signed-off-by: Duo Zhang --- .../resources/hbase-webapps/master/table.jsp | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 1d48a7561e1b..f0599b7aa64a 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -315,6 +315,7 @@ Region Server ReadRequests WriteRequests + Uncompressed StoreFileSize StorefileSize Num.Storefiles MemSize @@ -338,6 +339,7 @@ String hostAndPort = ""; String readReq = "N/A"; String writeReq = "N/A"; + String fileSizeUncompressed = ZEROMB; String fileSize = ZEROMB; String fileCount = "N/A"; String memSize = ZEROMB; @@ -356,6 +358,10 @@ if (rSize > 0) { fileSize = StringUtils.byteDesc((long) rSize); } + double rSizeUncompressed = load.getUncompressedStoreFileSize().get(Size.Unit.BYTE); + if (rSizeUncompressed > 0) { + fileSizeUncompressed = StringUtils.byteDesc((long) rSizeUncompressed); + } fileCount = String.format("%,1d", load.getStoreFileCount()); double mSize = load.getMemStoreSize().get(Size.Unit.BYTE); if (mSize > 0) { @@ -370,6 +376,7 @@ <%= StringEscapeUtils.escapeHtml4(hostAndPort) %> <%= readReq%> <%= writeReq%> + <%= fileSizeUncompressed%> <%= fileSize%> <%= fileCount%> <%= memSize%> @@ -834,6 +841,7 @@ <% long totalReadReq = 0; long totalWriteReq = 0; + long totalSizeUncompressed = 0; long totalSize = 0; long totalStoreFileCount = 0; long totalMemSize = 0; @@ -844,6 +852,7 @@ long totalBlocksLocalWithSsdWeight = 0; String totalCompactionProgress = ""; String totalMemSizeStr = ZEROMB; + String totalSizeUncompressedStr = ZEROMB; String totalSizeStr = ZEROMB; String totalLocality = ""; String totalLocalityForSsd = ""; @@ -865,6 +874,7 @@ if (regionMetrics != null) { totalReadReq += regionMetrics.getReadRequestCount(); totalWriteReq += regionMetrics.getWriteRequestCount(); + totalSizeUncompressed += regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); totalSize += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE); totalStoreFileCount += regionMetrics.getStoreFileCount(); totalMemSize += regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE); @@ -890,6 +900,9 @@ if (totalSize > 0) { totalSizeStr = StringUtils.byteDesc(totalSize*1024l*1024); } + if (totalSizeUncompressed > 0){ + totalSizeUncompressedStr = StringUtils.byteDesc(totalSizeUncompressed*1024l*1024); + } if (totalMemSize > 0) { totalMemSizeStr = StringUtils.byteDesc(totalMemSize*1024l*1024); } @@ -920,6 +933,7 @@ Region Server ReadRequests
(<%= String.format("%,1d", totalReadReq)%>) WriteRequests
(<%= String.format("%,1d", totalWriteReq)%>) + Uncompressed StoreFileSize
(<%= totalSizeUncompressedStr %>) StorefileSize
(<%= totalSizeStr %>) Num.Storefiles
(<%= String.format("%,1d", totalStoreFileCount)%>) MemSize
(<%= totalMemSizeStr %>) @@ -944,6 +958,7 @@ RegionMetrics load = hriEntry.getValue(); String readReq = "N/A"; String writeReq = "N/A"; + String regionSizeUncompressed = ZEROMB; String regionSize = ZEROMB; String fileCount = "N/A"; String memSize = ZEROMB; @@ -951,6 +966,10 @@ if (load != null) { readReq = String.format("%,1d", load.getReadRequestCount()); writeReq = String.format("%,1d", load.getWriteRequestCount()); + double rSizeUncompressed = load.getUncompressedStoreFileSize().get(Size.Unit.BYTE); + if (rSizeUncompressed > 0) { + regionSizeUncompressed = StringUtils.byteDesc((long)rSizeUncompressed); + } double rSize = load.getStoreFileSize().get(Size.Unit.BYTE); if (rSize > 0) { regionSize = StringUtils.byteDesc((long)rSize); @@ -987,6 +1006,7 @@ <%= buildRegionDeployedServerTag(regionInfo, master, regionsToServer) %> <%= readReq%> <%= writeReq%> + <%= regionSizeUncompressed%> <%= regionSize%> <%= fileCount%> <%= memSize%> From a683fcfbe8c5c84e58fa8670f4414f9d01ff43ed Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Sun, 14 Jan 2024 11:59:26 +0530 Subject: [PATCH 208/514] HBASE-27814 Add support for dump and process metrics servlet in REST InfoServer (#5215) Other changes: - Ensure info server stops during stop() - Extract header and footer. This would fix the log level page layout for rest web UI (See HBASE-20693) - Add hostname in the landing page instead of just port similar to other web UIs Signed-off-by: Nick Dimiduk --- .../hadoop/hbase/rest/RESTDumpServlet.java | 80 ++++++++ .../apache/hadoop/hbase/rest/RESTServer.java | 39 +++- .../resources/hbase-webapps/rest/footer.jsp | 32 +++ .../resources/hbase-webapps/rest/header.jsp | 74 +++++++ .../hbase-webapps/rest/processRest.jsp | 184 ++++++++++++++++++ .../resources/hbase-webapps/rest/rest.jsp | 80 ++------ 6 files changed, 422 insertions(+), 67 deletions(-) create mode 100644 hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTDumpServlet.java create mode 100644 hbase-rest/src/main/resources/hbase-webapps/rest/footer.jsp create mode 100644 hbase-rest/src/main/resources/hbase-webapps/rest/header.jsp create mode 100644 hbase-rest/src/main/resources/hbase-webapps/rest/processRest.jsp diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTDumpServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTDumpServlet.java new file mode 100644 index 000000000000..8bb306f7829a --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTDumpServlet.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.util.Date; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.http.HttpServer; +import org.apache.hadoop.hbase.monitoring.StateDumpServlet; +import org.apache.hadoop.hbase.util.LogMonitoring; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class RESTDumpServlet extends StateDumpServlet { + private static final long serialVersionUID = 1L; + private static final String LINE = "==========================================================="; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { + return; + } + + RESTServer restServer = (RESTServer) getServletContext().getAttribute(RESTServer.REST_SERVER); + assert restServer != null : "No REST Server in context!"; + + response.setContentType("text/plain"); + OutputStream os = response.getOutputStream(); + try (PrintWriter out = new PrintWriter(os)) { + + out.println("REST Server status for " + restServer.getServerName() + " as of " + new Date()); + + out.println("\n\nVersion Info:"); + out.println(LINE); + dumpVersionInfo(out); + + out.println("\n\nStacks:"); + out.println(LINE); + out.flush(); + PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); + Threads.printThreadInfo(ps, ""); + ps.flush(); + + out.println("\n\nREST Server configuration:"); + out.println(LINE); + Configuration conf = restServer.conf; + out.flush(); + conf.writeXml(os); + os.flush(); + + out.println("\n\nLogs"); + out.println(LINE); + long tailKb = getTailKbParam(request); + LogMonitoring.dumpTailOfLogs(out, tailKb); + + out.flush(); + } + } +} diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 886c81dc6680..42c00480526b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.rest; import java.lang.management.ManagementFactory; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; @@ -29,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.http.ClickjackingPreventionFilter; import org.apache.hadoop.hbase.http.HttpServerUtil; import org.apache.hadoop.hbase.http.InfoServer; @@ -83,6 +85,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class RESTServer implements Constants { static Logger LOG = LoggerFactory.getLogger("RESTServer"); + public static final String REST_SERVER = "rest"; static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; static final boolean REST_CSRF_ENABLED_DEFAULT = false; @@ -112,6 +115,7 @@ public class RESTServer implements Constants { private final UserProvider userProvider; private Server server; private InfoServer infoServer; + private ServerName serverName; public RESTServer(Configuration conf) { RESTServer.conf = conf; @@ -163,8 +167,7 @@ private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configur loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception { Class containerClass = ServletContainer.class; if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { - String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( - conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = getHostName(conf); String keytabFilename = conf.get(REST_KEYTAB_FILE); Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), REST_KEYTAB_FILE + " should be set if security is enabled"); @@ -402,9 +405,14 @@ public synchronized void run() throws Exception { // Put up info server. int port = conf.getInt("hbase.rest.info.port", 8085); if (port >= 0) { - conf.setLong("startcode", EnvironmentEdgeManager.currentTime()); - String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0"); - this.infoServer = new InfoServer("rest", a, port, false, conf); + final long startCode = EnvironmentEdgeManager.currentTime(); + conf.setLong("startcode", startCode); + this.serverName = ServerName.valueOf(getHostName(conf), servicePort, startCode); + + String addr = conf.get("hbase.rest.info.bindAddress", "0.0.0.0"); + this.infoServer = new InfoServer(REST_SERVER, addr, port, false, conf); + this.infoServer.addPrivilegedServlet("dump", "/dump", RESTDumpServlet.class); + this.infoServer.setAttribute(REST_SERVER, this); this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } @@ -412,6 +420,11 @@ public synchronized void run() throws Exception { server.start(); } + private static String getHostName(Configuration conf) throws UnknownHostException { + return Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); + } + public synchronized void join() throws Exception { if (server == null) { throw new IllegalStateException("Server is not running"); @@ -419,7 +432,19 @@ public synchronized void join() throws Exception { server.join(); } + private void stopInfoServer() { + if (this.infoServer != null) { + LOG.info("Stop info server"); + try { + this.infoServer.stop(); + } catch (Exception e) { + LOG.error("Failed to stop infoServer", e); + } + } + } + public synchronized void stop() throws Exception { + stopInfoServer(); if (server == null) { throw new IllegalStateException("Server is not running"); } @@ -443,6 +468,10 @@ public synchronized int getInfoPort() { return infoServer.getPort(); } + public ServerName getServerName() { + return serverName; + } + public Configuration getConf() { return conf; } diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/footer.jsp b/hbase-rest/src/main/resources/hbase-webapps/rest/footer.jsp new file mode 100644 index 000000000000..a642ac36eff7 --- /dev/null +++ b/hbase-rest/src/main/resources/hbase-webapps/rest/footer.jsp @@ -0,0 +1,32 @@ +<%-- +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +--%> + + + + + + + + diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/header.jsp b/hbase-rest/src/main/resources/hbase-webapps/rest/header.jsp new file mode 100644 index 000000000000..67f7656de592 --- /dev/null +++ b/hbase-rest/src/main/resources/hbase-webapps/rest/header.jsp @@ -0,0 +1,74 @@ +<%-- +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="org.apache.hadoop.hbase.HBaseConfiguration"%> + + + + + + + <%= request.getParameter("pageTitle")%> + + + + + + + + + +

+ diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/processRest.jsp b/hbase-rest/src/main/resources/hbase-webapps/rest/processRest.jsp new file mode 100644 index 000000000000..2b2d35fbfb3f --- /dev/null +++ b/hbase-rest/src/main/resources/hbase-webapps/rest/processRest.jsp @@ -0,0 +1,184 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.Date" + import="java.util.List" + import="javax.management.ObjectName" + import="java.lang.management.ManagementFactory" + import="java.lang.management.MemoryPoolMXBean" + import="java.lang.management.RuntimeMXBean" + import="java.lang.management.GarbageCollectorMXBean" + import="org.apache.hadoop.hbase.util.JSONMetricUtil" + import="org.apache.hadoop.hbase.procedure2.util.StringUtils" + import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix" +%> + +<% +RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean(); +ObjectName jvmMetrics = new ObjectName("Hadoop:service=HBase,name=JvmMetrics"); + +// There is always two of GC collectors +List gcBeans = JSONMetricUtil.getGcCollectorBeans(); +GarbageCollectorMXBean collector1 = null; +GarbageCollectorMXBean collector2 = null; +try { +collector1 = gcBeans.get(0); +collector2 = gcBeans.get(1); +} catch(IndexOutOfBoundsException e) {} +List mPools = JSONMetricUtil.getMemoryPools(); +pageContext.setAttribute("pageTitle", "Process info for PID: " + JSONMetricUtil.getProcessPID()); +%> + + + + + +
+
+ +
+ + + + + + + + + + + + + + +
StartedUptimePIDOwner
<%= new Date(runtimeBean.getStartTime()) %><%= StringUtils.humanTimeDiff(runtimeBean.getUptime()) %><%= JSONMetricUtil.getProcessPID() %><%= runtimeBean.getSystemProperties().get("user.name") %>
+
+
+
+ +
+ + + + + + + + + + + + + + + + + +
ThreadsNewThreadsRunableThreadsBlockedThreadsWaitingThreadsTimeWaitingThreadsTerminated
<%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsNew") %><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsRunnable")%><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsBlocked")%><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsWaiting")%><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsTimedWaiting")%><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsTerminated")%>
+
+
+
+ +
+ <% if (gcBeans.size() == 2) { %> +
+ +
+
+ + + + + + + + + + + +
Collection CountCollection TimeLast duration
<%= collector1.getCollectionCount() %> <%= StringUtils.humanTimeDiff(collector1.getCollectionTime()) %> <%= StringUtils.humanTimeDiff(JSONMetricUtil.getLastGcDuration( + collector1.getObjectName())) %>
+
+
+ + + + + + + + + + + +
Collection CountCollection TimeLast duration
<%= collector2.getCollectionCount() %> <%= StringUtils.humanTimeDiff(collector2.getCollectionTime()) %> <%= StringUtils.humanTimeDiff(JSONMetricUtil.getLastGcDuration( + collector2.getObjectName())) %>
+
+
+
+ <%} else { %> +

Can not display GC Collector stats.

+ <%} %> + Total GC Collection time: <%= StringUtils.humanTimeDiff(collector1.getCollectionTime() + + collector2.getCollectionTime())%> +
+<% for(MemoryPoolMXBean mp:mPools) { +if(mp.getName().contains("Cache")) continue;%> +
+
+ +
+ + + + + + + + + + + + + + + + +
CommitedInitMaxUsedUtilization [%]
<%= TraditionalBinaryPrefix.long2String(mp.getUsage().getCommitted(), "B", 1) %><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getInit(), "B", 1) %><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getMax(), "B", 1) %><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getUsed(), "B", 1) %><%= JSONMetricUtil.calcPercentage(mp.getUsage().getUsed(), + mp.getUsage().getCommitted()) %>
+
+<% } %> + + diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp b/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp index df8f0838d6cc..ce6725f283a7 100644 --- a/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp +++ b/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp @@ -18,70 +18,29 @@ */ --%> <%@ page contentType="text/html;charset=UTF-8" - import="org.apache.hadoop.conf.Configuration" - import="org.apache.hadoop.hbase.HBaseConfiguration" - import="org.apache.hadoop.hbase.rest.model.VersionModel" - import="org.apache.hadoop.hbase.util.VersionInfo" - import="java.util.Date"%> + import="org.apache.hadoop.conf.Configuration" + import="org.apache.hadoop.hbase.rest.RESTServer" + import="org.apache.hadoop.hbase.rest.model.VersionModel" + import="org.apache.hadoop.hbase.util.VersionInfo" + import="java.util.Date"%> + <% -Configuration conf = (Configuration)getServletContext().getAttribute("hbase.conf"); -long startcode = conf.getLong("startcode", System.currentTimeMillis()); -String listenPort = conf.get("hbase.rest.port", "8080"); -%> - - - - - - HBase REST Server: <%= listenPort %> - - + Configuration conf = (Configuration) getServletContext().getAttribute("hbase.conf"); + long startcode = conf.getLong("startcode", System.currentTimeMillis()); - - - - + final RESTServer restServer = (RESTServer) getServletContext().getAttribute(RESTServer.REST_SERVER); + final String hostName = restServer.getServerName().getHostname(); + pageContext.setAttribute("pageTitle", "HBase REST Server" + hostName); +%> - - + + +
@@ -124,9 +83,6 @@ String listenPort = conf.get("hbase.rest.port", "8080");
- - - - - + + From 7016c640960dd875f2daf78601dc68bfe14a1219 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Sun, 14 Jan 2024 17:18:54 -0500 Subject: [PATCH 209/514] HBASE-28307 Add hbase-openssl module and include in release binaries (#5623) Signed-off-by: Duo Zhang Signed-off-by: Nihal Jain --- hbase-assembly/pom.xml | 4 + .../hadoop/hbase/io/crypto/tls/X509Util.java | 3 +- hbase-extensions/hbase-openssl/pom.xml | 43 ++++++++ hbase-extensions/pom.xml | 99 +++++++++++++++++++ pom.xml | 11 +++ 5 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 hbase-extensions/hbase-openssl/pom.xml create mode 100644 hbase-extensions/pom.xml diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 6a317045109b..a6e88cdbf77e 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -61,6 +61,10 @@ compile + + org.apache.hbase + hbase-openssl + org.apache.hbase hbase-server diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java index fff32866fb9a..46809050b5a3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java @@ -299,7 +299,8 @@ public static SslContext createSslContextForClient(Configuration config) * Adds SslProvider.OPENSSL if OpenSsl is available and enabled. In order to make it available, * one must ensure that a properly shaded netty-tcnative is on the classpath. Properly shaded * means relocated to be prefixed with "org.apache.hbase.thirdparty" like the rest of the netty - * classes. + * classes. We make available org.apache.hbase:hbase-openssl as a convenience module which one can + * use to pull in a shaded netty-tcnative statically linked against boringssl. */ private static boolean configureOpenSslIfAvailable(SslContextBuilder sslContextBuilder, Configuration conf) { diff --git a/hbase-extensions/hbase-openssl/pom.xml b/hbase-extensions/hbase-openssl/pom.xml new file mode 100644 index 000000000000..7158c2f4197e --- /dev/null +++ b/hbase-extensions/hbase-openssl/pom.xml @@ -0,0 +1,43 @@ + + + + 4.0.0 + + org.apache.hbase + hbase-extensions + ${revision} + ../pom.xml + + + hbase-openssl + jar + Apache HBase - OpenSSL support for TLS RPC + Includes tcnative bindings so that netty TLS can use OpenSSL + + + + org.apache.hbase.thirdparty + hbase-shaded-netty-tcnative + runtime + + + + diff --git a/hbase-extensions/pom.xml b/hbase-extensions/pom.xml new file mode 100644 index 000000000000..8a11e7754ea2 --- /dev/null +++ b/hbase-extensions/pom.xml @@ -0,0 +1,99 @@ + + + + 4.0.0 + + org.apache.hbase + hbase-build-configuration + ${revision} + ../hbase-build-configuration + + + hbase-extensions + pom + Apache HBase - Extensions + Parent for optional extension modules + + + hbase-openssl + + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${project.basedir}/../../dev-support/spotbugs-exclude.xml + true + true + Max + + + + + maven-assembly-plugin + + true + + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + + + spotbugs + + false + + ${project.basedir}/../dev-support/spotbugs-exclude.xml + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + + maven-assembly-plugin + + true + + + + + + diff --git a/pom.xml b/pom.xml index a59379f8dc92..a32e73fb3377 100644 --- a/pom.xml +++ b/pom.xml @@ -758,6 +758,7 @@ hbase-asyncfs hbase-logging hbase-compression + hbase-extensions scm:git:git://gitbox.apache.org/repos/asf/hbase.git @@ -1331,6 +1332,11 @@ hbase-compression-zstd ${project.version} + + org.apache.hbase + hbase-openssl + ${project.version} + com.github.stephenc.findbugs @@ -1749,6 +1755,11 @@ hbase-shaded-netty ${hbase-thirdparty.version} + + org.apache.hbase.thirdparty + hbase-shaded-netty-tcnative + ${hbase-thirdparty.version} + org.apache.hbase.thirdparty hbase-shaded-protobuf From 3cfc92fa427441f1991df6ce5e6b95618710ea91 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 16 Jan 2024 12:02:01 +0800 Subject: [PATCH 210/514] HBASE-28265 Add 3.0.0-beta-1 to download page (#5624) Signed-off-by: Yi Mei Signed-off-by: Xin Sun --- src/site/xdoc/downloads.xml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index dc7a3148b579..e9e9d97ebaed 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,27 +45,27 @@ under the License. - 3.0.0-alpha-4 + 3.0.0-beta-1 - 2023/06/07 + 2024/01/14 - 3.0.0-alpha-4 vs 2.0.0 + 3.0.0-beta-1 vs 2.0.0 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc) - Testing only, not production ready + Feature freeze, passed a 10B ITBLL run, use with caution From f5fd60ea54f25e9d56c8b6d0e3783c42b169dfef Mon Sep 17 00:00:00 2001 From: jbewing Date: Tue, 16 Jan 2024 08:59:05 -0500 Subject: [PATCH 211/514] HBASE-28256 Enhance ByteBufferUtils.readVLong to read more bytes at a time (#5576) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang --- .../hadoop/hbase/util/ByteBufferUtils.java | 83 ++++++++++++++----- .../hbase/util/TestByteBufferUtils.java | 18 ++++ 2 files changed, 78 insertions(+), 23 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index 054de74d7d1e..d6b936323404 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -468,38 +468,75 @@ public static void writeVLong(ByteBuffer out, long i) { } } - private interface ByteVisitor { - byte get(); - } - - private static long readVLong(ByteVisitor visitor) { - byte firstByte = visitor.get(); + /** + * Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a + * {@link ByteBuff}. + */ + public static long readVLong(ByteBuff buf) { + byte firstByte = buf.get(); int len = WritableUtils.decodeVIntSize(firstByte); if (len == 1) { return firstByte; + } else { + int remaining = len - 1; + long i = 0; + int offsetFromPos = 0; + if (remaining >= Bytes.SIZEOF_INT) { + // The int read has to be converted to unsigned long so the & op + i = (buf.getIntAfterPosition(offsetFromPos) & 0x00000000ffffffffL); + remaining -= Bytes.SIZEOF_INT; + offsetFromPos += Bytes.SIZEOF_INT; + } + if (remaining >= Bytes.SIZEOF_SHORT) { + short s = buf.getShortAfterPosition(offsetFromPos); + i = i << 16; + i = i | (s & 0xFFFF); + remaining -= Bytes.SIZEOF_SHORT; + offsetFromPos += Bytes.SIZEOF_SHORT; + } + for (int idx = 0; idx < remaining; idx++) { + byte b = buf.getByteAfterPosition(offsetFromPos + idx); + i = i << 8; + i = i | (b & 0xFF); + } + buf.skip(len - 1); + return WritableUtils.isNegativeVInt(firstByte) ? ~i : i; } - long i = 0; - for (int idx = 0; idx < len - 1; idx++) { - byte b = visitor.get(); - i = i << 8; - i = i | (b & 0xFF); - } - return (WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i); } /** * Similar to {@link WritableUtils#readVLong(DataInput)} but reads from a {@link ByteBuffer}. */ - public static long readVLong(ByteBuffer in) { - return readVLong(in::get); - } - - /** - * Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a - * {@link ByteBuff}. - */ - public static long readVLong(ByteBuff in) { - return readVLong(in::get); + public static long readVLong(ByteBuffer buf) { + byte firstByte = buf.get(); + int len = WritableUtils.decodeVIntSize(firstByte); + if (len == 1) { + return firstByte; + } else { + int remaining = len - 1; + long i = 0; + int offsetFromPos = 0; + if (remaining >= Bytes.SIZEOF_INT) { + // The int read has to be converted to unsigned long so the & op + i = (buf.getInt(buf.position() + offsetFromPos) & 0x00000000ffffffffL); + remaining -= Bytes.SIZEOF_INT; + offsetFromPos += Bytes.SIZEOF_INT; + } + if (remaining >= Bytes.SIZEOF_SHORT) { + short s = buf.getShort(buf.position() + offsetFromPos); + i = i << 16; + i = i | (s & 0xFFFF); + remaining -= Bytes.SIZEOF_SHORT; + offsetFromPos += Bytes.SIZEOF_SHORT; + } + for (int idx = 0; idx < remaining; idx++) { + byte b = buf.get(buf.position() + offsetFromPos + idx); + i = i << 8; + i = i | (b & 0xFF); + } + buf.position(buf.position() + len - 1); + return WritableUtils.isNegativeVInt(firstByte) ? ~i : i; + } } /** diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java index eabfed2042ca..e07e75bffdb2 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; @@ -179,6 +180,23 @@ public void testReadWriteVLong() { ByteBufferUtils.writeVLong(b, l); b.flip(); assertEquals(l, ByteBufferUtils.readVLong(b)); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + } + } + + @Test + public void testReadWriteConsecutiveVLong() { + for (long l : testNumbers) { + ByteBuffer b = ByteBuffer.allocate(2 * MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + ByteBufferUtils.writeVLong(b, l - 4); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(b)); + assertEquals(l - 4, ByteBufferUtils.readVLong(b)); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + assertEquals(l - 4, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); } } From 696f58373be38a2e8c598f202cdcd874cc6306f4 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 16 Jan 2024 16:42:52 -0500 Subject: [PATCH 212/514] HBASE-28306 Add property to customize Version information (#5621) Signed-off-by: Duo Zhang --- hbase-common/pom.xml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index fc3136e05558..dd30a7a6f581 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -31,6 +31,15 @@ Apache HBase - Common Common functionality for HBase + + + ${project.version} + + org.apache.hbase @@ -211,7 +220,7 @@ process-resources - + @@ -227,7 +236,7 @@ - + From 6017937ced115b1ad367faf9f836c3112f6b6f9d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 17 Jan 2024 11:20:14 +0800 Subject: [PATCH 213/514] HBASE-28312 The bad auth exception can not be passed to client rpc calls properly (#5629) Signed-off-by: Bryan Beaudreault --- .../hbase/ipc/BlockingRpcConnection.java | 76 ++++++++++--------- .../apache/hadoop/hbase/ipc/ConnectionId.java | 2 +- .../org/apache/hadoop/hbase/ipc/IPCUtil.java | 19 ++++- .../hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- .../hadoop/hbase/ipc/RpcConnection.java | 3 +- .../hadoop/hbase/ipc/DummyException.java | 27 +++++++ .../ipc/DummyFatalConnectionException.java | 27 +++++++ .../apache/hadoop/hbase/ipc/TestIPCUtil.java | 22 ++++++ .../ipc/NettyRpcServerPreambleHandler.java | 9 +++ .../hadoop/hbase/ipc/AbstractTestIPC.java | 23 ++++++ .../hbase/ipc/BadAuthNettyRpcConnection.java | 36 +++++++++ .../hadoop/hbase/ipc/TestBlockingIPC.java | 20 +++++ .../apache/hadoop/hbase/ipc/TestNettyIPC.java | 11 +++ .../{security => ipc}/TestNettyTlsIPC.java | 20 +++-- .../security/TestNettyTLSIPCFileWatcher.java | 7 +- .../hadoop/hbase/security/TestSaslTlsIPC.java | 2 +- 16 files changed, 254 insertions(+), 52 deletions(-) create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyException.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyFatalConnectionException.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/BadAuthNettyRpcConnection.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/{security => ipc}/TestNettyTlsIPC.java (94%) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index 81ad4d2f056d..f30b77c64fe9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -18,9 +18,7 @@ package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.ipc.IPCUtil.buildRequestHeader; -import static org.apache.hadoop.hbase.ipc.IPCUtil.createRemoteException; import static org.apache.hadoop.hbase.ipc.IPCUtil.getTotalSizeWhenWrittenDelimited; -import static org.apache.hadoop.hbase.ipc.IPCUtil.isFatalConnectionException; import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled; import static org.apache.hadoop.hbase.ipc.IPCUtil.write; @@ -68,6 +66,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; @@ -657,6 +656,25 @@ private void readResponse() { // Read the header ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); int id = responseHeader.getCallId(); + if (LOG.isTraceEnabled()) { + LOG.trace("got response header " + TextFormat.shortDebugString(responseHeader) + + ", totalSize: " + totalSize + " bytes"); + } + RemoteException remoteExc; + if (responseHeader.hasException()) { + ExceptionResponse exceptionResponse = responseHeader.getException(); + remoteExc = IPCUtil.createRemoteException(exceptionResponse); + if (IPCUtil.isFatalConnectionException(exceptionResponse)) { + // Here we will cleanup all calls so do not need to fall back, just return. + synchronized (this) { + closeConn(remoteExc); + } + return; + } + } else { + remoteExc = null; + } + call = calls.remove(id); // call.done have to be set before leaving this method expectedCall = (call != null && !call.isDone()); if (!expectedCall) { @@ -667,46 +685,34 @@ private void readResponse() { // this connection. int readSoFar = getTotalSizeWhenWrittenDelimited(responseHeader); int whatIsLeftToRead = totalSize - readSoFar; + LOG.debug("Unknown callId: " + id + ", skipping over this response of " + whatIsLeftToRead + + " bytes"); IOUtils.skipFully(in, whatIsLeftToRead); if (call != null) { call.callStats.setResponseSizeBytes(totalSize); - call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } return; } - if (responseHeader.hasException()) { - ExceptionResponse exceptionResponse = responseHeader.getException(); - RemoteException re = createRemoteException(exceptionResponse); - call.setException(re); - call.callStats.setResponseSizeBytes(totalSize); - call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); - if (isFatalConnectionException(exceptionResponse)) { - synchronized (this) { - closeConn(re); - } - } - } else { - Message value = null; - if (call.responseDefaultType != null) { - Message.Builder builder = call.responseDefaultType.newBuilderForType(); - ProtobufUtil.mergeDelimitedFrom(builder, in); - value = builder.build(); - } - CellScanner cellBlockScanner = null; - if (responseHeader.hasCellBlockMeta()) { - int size = responseHeader.getCellBlockMeta().getLength(); - byte[] cellBlock = new byte[size]; - IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length); - cellBlockScanner = this.rpcClient.cellBlockBuilder.createCellScanner(this.codec, - this.compressor, cellBlock); - } - call.setResponse(value, cellBlockScanner); - call.callStats.setResponseSizeBytes(totalSize); - call.callStats - .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); + call.callStats.setResponseSizeBytes(totalSize); + if (remoteExc != null) { + call.setException(remoteExc); + return; + } + Message value = null; + if (call.responseDefaultType != null) { + Message.Builder builder = call.responseDefaultType.newBuilderForType(); + ProtobufUtil.mergeDelimitedFrom(builder, in); + value = builder.build(); + } + CellScanner cellBlockScanner = null; + if (responseHeader.hasCellBlockMeta()) { + int size = responseHeader.getCellBlockMeta().getLength(); + byte[] cellBlock = new byte[size]; + IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length); + cellBlockScanner = + this.rpcClient.cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); } + call.setResponse(value, cellBlockScanner); } catch (IOException e) { if (expectedCall) { call.setException(e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index 4de82e0c12af..f0b00c459d06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -27,7 +27,7 @@ * uniquely identified by <remoteAddress, ticket, serviceName> */ @InterfaceAudience.Private -class ConnectionId { +public class ConnectionId { private static final int PRIME = 16777619; final User ticket; final String serviceName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index d6df6c974ccf..bf4b833e856c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; @@ -62,6 +64,8 @@ @InterfaceAudience.Private class IPCUtil { + private static final Logger LOG = LoggerFactory.getLogger(IPCUtil.class); + /** * Write out header, param, and cell block if there is one. * @param dos Stream to write into @@ -159,8 +163,19 @@ static RemoteException createRemoteException(final ExceptionResponse e) { } /** Returns True if the exception is a fatal connection exception. */ - static boolean isFatalConnectionException(final ExceptionResponse e) { - return e.getExceptionClassName().equals(FatalConnectionException.class.getName()); + static boolean isFatalConnectionException(ExceptionResponse e) { + if (e.getExceptionClassName().equals(FatalConnectionException.class.getName())) { + return true; + } + // try our best to check for sub classes of FatalConnectionException + try { + return e.getExceptionClassName() != null && FatalConnectionException.class.isAssignableFrom( + Class.forName(e.getExceptionClassName(), false, IPCUtil.class.getClassLoader())); + // Class.forName may throw ExceptionInInitializerError so we have to catch Throwable here + } catch (Throwable t) { + LOG.debug("Can not get class object for {}", e.getExceptionClassName(), t); + return false; + } } static IOException toIOE(Throwable t) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 408ea347e7a3..4d8564be7a25 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -79,7 +79,7 @@ * @since 2.0.0 */ @InterfaceAudience.Private -class NettyRpcConnection extends RpcConnection { +public class NettyRpcConnection extends RpcConnection { private static final Logger LOG = LoggerFactory.getLogger(NettyRpcConnection.class); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 31698a1a1e8e..dbe6ed1648df 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -145,7 +145,8 @@ public void run(Timeout timeout) throws Exception { } } - protected final byte[] getConnectionHeaderPreamble() { + // will be overridden in tests + protected byte[] getConnectionHeaderPreamble() { // Assemble the preamble up in a buffer first and then send it. Writing individual elements, // they are getting sent across piecemeal according to wireshark and then server is messing // up the reading on occasion (the passed in stream is not buffered yet). diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyException.java new file mode 100644 index 000000000000..407c1248a98d --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyException.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +/** + * Just a dummy exception for testing IPCUtil.isFatalConnectionException. + */ +public class DummyException extends Exception { + + private static final long serialVersionUID = 215191975455115118L; + +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyFatalConnectionException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyFatalConnectionException.java new file mode 100644 index 000000000000..437b60b031b6 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/DummyFatalConnectionException.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +/** + * Just a dummy exception for testing IPCUtil.isFatalConnectionException. + */ +public class DummyFatalConnectionException extends FatalConnectionException { + + private static final long serialVersionUID = -1966815615846798490L; + +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 67a8d15c1d02..d0e4044b0456 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -19,6 +19,7 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -44,6 +45,8 @@ import org.apache.hbase.thirdparty.io.netty.channel.DefaultEventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; + @Category({ ClientTests.class, SmallTests.class }) public class TestIPCUtil { @@ -159,4 +162,23 @@ public void run() { eventLoop.shutdownGracefully().get(); } } + + @Test + public void testIsFatalConnectionException() { + // intentionally not reference the class object directly, so here we will not load the class, to + // make sure that in isFatalConnectionException, we can use initialized = false when calling + // Class.forName + ExceptionResponse resp = ExceptionResponse.newBuilder() + .setExceptionClassName("org.apache.hadoop.hbase.ipc.DummyFatalConnectionException").build(); + assertTrue(IPCUtil.isFatalConnectionException(resp)); + + resp = ExceptionResponse.newBuilder() + .setExceptionClassName("org.apache.hadoop.hbase.ipc.DummyException").build(); + assertFalse(IPCUtil.isFatalConnectionException(resp)); + + // class not found + resp = ExceptionResponse.newBuilder() + .setExceptionClassName("org.apache.hadoop.hbase.ipc.WhatEver").build(); + assertFalse(IPCUtil.isFatalConnectionException(resp)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index b79a67f986e8..02e1b5858117 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -38,6 +38,7 @@ class NettyRpcServerPreambleHandler extends SimpleChannelInboundHandler private final NettyRpcServer rpcServer; private final NettyServerRpcConnection conn; + private boolean processPreambleError; public NettyRpcServerPreambleHandler(NettyRpcServer rpcServer, NettyServerRpcConnection conn) { this.rpcServer = rpcServer; @@ -46,10 +47,18 @@ public NettyRpcServerPreambleHandler(NettyRpcServer rpcServer, NettyServerRpcCon @Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { + if (processPreambleError) { + // if we failed to process preamble, we will close the connection immediately, but it is + // possible that we have already received some bytes after the 'preamble' so when closing, the + // netty framework will still pass them here. So we set a flag here to just skip processing + // these broken messages. + return; + } ByteBuffer buf = ByteBuffer.allocate(msg.readableBytes()); msg.readBytes(buf); buf.flip(); if (!conn.processPreamble(buf)) { + processPreambleError = true; conn.close(); return; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index fe947d33110d..a93f54d4d9d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -29,9 +29,11 @@ import static org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl.newStub; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -53,6 +55,7 @@ import java.net.InetSocketAddress; import java.time.Duration; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -545,4 +548,24 @@ public void testTracingErrorIpc() throws IOException { hasTraceId(traceRule.getSpans().iterator().next().getTraceId())))); } } + + protected abstract AbstractRpcClient createBadAuthRpcClient(Configuration conf); + + @Test + public void testBadPreambleHeader() throws IOException, ServiceException { + Configuration clientConf = new Configuration(CONF); + RpcServer rpcServer = createRpcServer("testRpcServer", Collections.emptyList(), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); + try (AbstractRpcClient client = createBadAuthRpcClient(clientConf)) { + rpcServer.start(); + BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); + ServiceException se = assertThrows(ServiceException.class, + () -> stub.echo(null, EchoRequestProto.newBuilder().setMessage("hello").build())); + IOException ioe = ProtobufUtil.handleRemoteException(se); + assertThat(ioe, instanceOf(BadAuthException.class)); + assertThat(ioe.getMessage(), containsString("authName=unknown")); + } finally { + rpcServer.stop(); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/BadAuthNettyRpcConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/BadAuthNettyRpcConnection.java new file mode 100644 index 000000000000..63554421dfb6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/BadAuthNettyRpcConnection.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import java.io.IOException; + +public class BadAuthNettyRpcConnection extends NettyRpcConnection { + + public BadAuthNettyRpcConnection(NettyRpcClient rpcClient, ConnectionId remoteId) + throws IOException { + super(rpcClient, remoteId); + } + + @Override + protected byte[] getConnectionHeaderPreamble() { + byte[] header = super.getConnectionHeaderPreamble(); + // set an invalid auth code + header[header.length - 1] = -10; + return header; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java index 4d7d0996fabd..9544e8c35458 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java @@ -107,4 +107,24 @@ protected RpcServer createTestFailingRpcServer(String name, Configuration conf, RpcScheduler scheduler) throws IOException { return new TestFailingRpcServer(null, name, services, bindAddress, conf, scheduler); } + + @Override + protected AbstractRpcClient createBadAuthRpcClient(Configuration conf) { + return new BlockingRpcClient(conf) { + + @Override + protected BlockingRpcConnection createConnection(ConnectionId remoteId) throws IOException { + return new BlockingRpcConnection(this, remoteId) { + @Override + protected byte[] getConnectionHeaderPreamble() { + byte[] header = super.getConnectionHeaderPreamble(); + // set an invalid auth code + header[header.length - 1] = -10; + return header; + } + }; + } + + }; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java index 265ae7852f02..6feab5f2cac8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java @@ -146,4 +146,15 @@ protected RpcServer createTestFailingRpcServer(String name, Configuration conf, RpcScheduler scheduler) throws IOException { return new FailingNettyRpcServer(null, name, services, bindAddress, conf, scheduler); } + + @Override + protected AbstractRpcClient createBadAuthRpcClient(Configuration conf) { + return new NettyRpcClient(conf) { + + @Override + protected NettyRpcConnection createConnection(ConnectionId remoteId) throws IOException { + return new BadAuthNettyRpcConnection(this, remoteId); + } + }; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTlsIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java similarity index 94% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTlsIPC.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java index f21e3b93bef5..4c654123e130 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTlsIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.security; +package org.apache.hadoop.hbase.ipc; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -37,13 +37,6 @@ import org.apache.hadoop.hbase.io.crypto.tls.X509TestContext; import org.apache.hadoop.hbase.io.crypto.tls.X509TestContextProvider; import org.apache.hadoop.hbase.io.crypto.tls.X509Util; -import org.apache.hadoop.hbase.ipc.AbstractRpcClient; -import org.apache.hadoop.hbase.ipc.AbstractTestIPC; -import org.apache.hadoop.hbase.ipc.FailingNettyRpcServer; -import org.apache.hadoop.hbase.ipc.NettyRpcClient; -import org.apache.hadoop.hbase.ipc.NettyRpcServer; -import org.apache.hadoop.hbase.ipc.RpcScheduler; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RPCTests; @@ -193,4 +186,15 @@ protected RpcServer createTestFailingRpcServer(String name, RpcScheduler scheduler) throws IOException { return new FailingNettyRpcServer(SERVER, name, services, bindAddress, conf, scheduler); } + + @Override + protected AbstractRpcClient createBadAuthRpcClient(Configuration conf) { + return new NettyRpcClient(conf) { + + @Override + protected NettyRpcConnection createConnection(ConnectionId remoteId) throws IOException { + return new BadAuthNettyRpcConnection(this, remoteId); + } + }; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTLSIPCFileWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTLSIPCFileWatcher.java index 72fc7141680a..403a538f024b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTLSIPCFileWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTLSIPCFileWatcher.java @@ -109,14 +109,15 @@ public static List data() { @BeforeClass public static void setUpBeforeClass() throws IOException { Security.addProvider(new BouncyCastleProvider()); - File dir = new File(UTIL.getDataTestDir(TestNettyTlsIPC.class.getSimpleName()).toString()) - .getCanonicalFile(); + File dir = + new File(UTIL.getDataTestDir(TestNettyTLSIPCFileWatcher.class.getSimpleName()).toString()) + .getCanonicalFile(); FileUtils.forceMkdir(dir); // server must enable tls CONF.setBoolean(X509Util.HBASE_SERVER_NETTY_TLS_ENABLED, true); PROVIDER = new X509TestContextProvider(CONF, dir); EVENT_LOOP_GROUP_CONFIG = - NettyEventLoopGroupConfig.setup(CONF, TestNettyTlsIPC.class.getSimpleName()); + NettyEventLoopGroupConfig.setup(CONF, TestNettyTLSIPCFileWatcher.class.getSimpleName()); SERVER = mock(HBaseServerBase.class); when(SERVER.getEventLoopGroupConfig()).thenReturn(EVENT_LOOP_GROUP_CONFIG); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPC.java index 1477e8aa0fca..1120d56fb9fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPC.java @@ -97,7 +97,7 @@ public static List data() { @BeforeClass public static void setUpBeforeClass() throws Exception { Security.addProvider(new BouncyCastleProvider()); - File dir = new File(TEST_UTIL.getDataTestDir(TestNettyTlsIPC.class.getSimpleName()).toString()) + File dir = new File(TEST_UTIL.getDataTestDir(TestSaslTlsIPC.class.getSimpleName()).toString()) .getCanonicalFile(); FileUtils.forceMkdir(dir); initKDCAndConf(); From 6b0ce08c82cfe2e7d34d6b62578e71aa2ef8b5f5 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 17 Jan 2024 13:36:06 +0800 Subject: [PATCH 214/514] HBASE-28312 Addendum NettyRpcConnection and ConnectionId do not need to be public --- .../src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java | 2 +- .../java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index f0b00c459d06..4de82e0c12af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -27,7 +27,7 @@ * uniquely identified by <remoteAddress, ticket, serviceName> */ @InterfaceAudience.Private -public class ConnectionId { +class ConnectionId { private static final int PRIME = 16777619; final User ticket; final String serviceName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 4d8564be7a25..408ea347e7a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -79,7 +79,7 @@ * @since 2.0.0 */ @InterfaceAudience.Private -public class NettyRpcConnection extends RpcConnection { +class NettyRpcConnection extends RpcConnection { private static final Logger LOG = LoggerFactory.getLogger(NettyRpcConnection.class); From c001ed3ba3c25a0d1a684c393092756b7ad9ee25 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 18 Jan 2024 16:59:01 +0800 Subject: [PATCH 215/514] HBASE-28316 Add BootstrapNodeService handlers (#5637) Signed-off-by: Bryan Beaudreault --- .../client/RegistryEndpointsRefresher.java | 2 +- .../hadoop/hbase/security/SecurityInfo.java | 6 + .../apache/hadoop/hbase/ipc/RpcServer.java | 7 ++ .../hbase/regionserver/RSRpcServices.java | 13 ++- .../hbase/security/HBasePolicyProvider.java | 7 +- .../hbase/client/TestBootstrapNodeUpdate.java | 103 ++++++++++++++++++ ...curityInfoAndHBasePolicyProviderMatch.java | 82 ++++++++++++++ 7 files changed, 216 insertions(+), 4 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityInfoAndHBasePolicyProviderMatch.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java index ac7cad275813..cdb2dd92b4fc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryEndpointsRefresher.java @@ -154,7 +154,7 @@ static RegistryEndpointsRefresher create(Configuration conf, String initialDelay TimeUnit.SECONDS.toMillis(conf.getLong(initialDelaySecsConfigName, periodicRefreshMs / 10))); long minTimeBetweenRefreshesMs = TimeUnit.SECONDS .toMillis(conf.getLong(minIntervalSecsConfigName, MIN_SECS_BETWEEN_REFRESHES_DEFAULT)); - Preconditions.checkArgument(minTimeBetweenRefreshesMs < periodicRefreshMs); + Preconditions.checkArgument(minTimeBetweenRefreshesMs <= periodicRefreshMs); return new RegistryEndpointsRefresher(initialDelayMs, periodicRefreshMs, minTimeBetweenRefreshesMs, refresher); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index dbb4c83844a4..2e16d5646953 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -23,7 +23,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; +import org.apache.hadoop.hbase.shaded.protobuf.generated.BootstrapNodeProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; @@ -50,6 +52,10 @@ public class SecurityInfo { new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegistryProtos.ClientMetaService.getDescriptor().getName(), new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + infos.put(BootstrapNodeProtos.BootstrapNodeService.getDescriptor().getName(), + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + infos.put(LockServiceProtos.LockService.getDescriptor().getName(), + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); // NOTE: IF ADDING A NEW SERVICE, BE SURE TO UPDATE HBasePolicyProvider ALSO ELSE // new Service will not be found when all is Kerberized!!!! } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 6b4bf28bc959..d3ec4ff8c73e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; +import com.google.errorprone.annotations.RestrictedApi; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -881,4 +882,10 @@ public void setNamedQueueRecorder(NamedQueueRecorder namedQueueRecorder) { protected boolean needAuthorization() { return authorize; } + + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public List getServices() { + return services; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index a43fac6993e6..0fe6f6476a6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -336,8 +336,8 @@ public class RSRpcServices extends HBaseRpcServicesBase /** * Services launched in RSRpcServices. By default they are on but you can use the below booleans - * to selectively enable/disable either Admin or Client Service (Rare is the case where you would - * ever turn off one or the other). + * to selectively enable/disable these services (Rare is the case where you would ever turn off + * one or the other). */ public static final String REGIONSERVER_ADMIN_SERVICE_CONFIG = "hbase.regionserver.admin.executorService"; @@ -345,6 +345,8 @@ public class RSRpcServices extends HBaseRpcServicesBase "hbase.regionserver.client.executorService"; public static final String REGIONSERVER_CLIENT_META_SERVICE_CONFIG = "hbase.regionserver.client.meta.executorService"; + public static final String REGIONSERVER_BOOTSTRAP_NODES_SERVICE_CONFIG = + "hbase.regionserver.bootstrap.nodes.executorService"; /** * An Rpc callback for closing a RegionScanner. @@ -1449,6 +1451,8 @@ protected List getServices() { boolean client = getConfiguration().getBoolean(REGIONSERVER_CLIENT_SERVICE_CONFIG, true); boolean clientMeta = getConfiguration().getBoolean(REGIONSERVER_CLIENT_META_SERVICE_CONFIG, true); + boolean bootstrapNodes = + getConfiguration().getBoolean(REGIONSERVER_BOOTSTRAP_NODES_SERVICE_CONFIG, true); List bssi = new ArrayList<>(); if (client) { bssi.add(new BlockingServiceAndInterface(ClientService.newReflectiveBlockingService(this), @@ -1462,6 +1466,11 @@ protected List getServices() { bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), ClientMetaService.BlockingInterface.class)); } + if (bootstrapNodes) { + bssi.add( + new BlockingServiceAndInterface(BootstrapNodeService.newReflectiveBlockingService(this), + BootstrapNodeService.BlockingInterface.class)); + } return new ImmutableList.Builder().addAll(bssi).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java index 91323a722150..30578a91909b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java @@ -25,7 +25,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.BootstrapNodeProtos.BootstrapNodeService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; @@ -44,8 +46,11 @@ public class HBasePolicyProvider extends PolicyProvider { new Service("security.client.protocol.acl", RegistryProtos.ClientMetaService.BlockingInterface.class), new Service("security.admin.protocol.acl", MasterService.BlockingInterface.class), + new Service("security.admin.protocol.acl", LockService.BlockingInterface.class), new Service("security.masterregion.protocol.acl", - RegionServerStatusService.BlockingInterface.class) }; + RegionServerStatusService.BlockingInterface.class), + new Service("security.regionserver.protocol.acl", + BootstrapNodeService.BlockingInterface.class) }; @Override public Service[] getServices() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java new file mode 100644 index 000000000000..d5b0ee18e594 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasItem; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.util.Set; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseRpcServicesBase; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.BootstrapNodeManager; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + +/** + * Make sure that we can update the bootstrap server from master to region server, and region server + * could also contact each other to update the bootstrap nodes. + */ +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestBootstrapNodeUpdate { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBootstrapNodeUpdate.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static RpcConnectionRegistry REGISTRY; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.setLong(BootstrapNodeManager.REQUEST_MASTER_INTERVAL_SECS, 5); + conf.setLong(BootstrapNodeManager.REQUEST_MASTER_MIN_INTERVAL_SECS, 1); + conf.setLong(BootstrapNodeManager.REQUEST_REGIONSERVER_INTERVAL_SECS, 1); + conf.setInt(HBaseRpcServicesBase.CLIENT_BOOTSTRAP_NODE_LIMIT, 2); + conf.setLong(RpcConnectionRegistry.INITIAL_REFRESH_DELAY_SECS, 5); + conf.setLong(RpcConnectionRegistry.PERIODIC_REFRESH_INTERVAL_SECS, 1); + conf.setLong(RpcConnectionRegistry.MIN_SECS_BETWEEN_REFRESHES, 1); + UTIL.startMiniCluster(3); + REGISTRY = new RpcConnectionRegistry(conf); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + Closeables.close(REGISTRY, true); + UTIL.shutdownMiniCluster(); + } + + @Test + public void testUpdate() throws Exception { + ServerName activeMasterServerName = REGISTRY.getActiveMaster().get(); + ServerName masterInConf = ServerName.valueOf(activeMasterServerName.getHostname(), + activeMasterServerName.getPort(), -1); + // we should have master in the beginning + assertThat(REGISTRY.getParsedServers(), hasItem(masterInConf)); + // and after refreshing, we will switch to use region servers + UTIL.waitFor(15000, () -> !REGISTRY.getParsedServers().contains(masterInConf) + && !REGISTRY.getParsedServers().contains(activeMasterServerName)); + Set parsedServers = REGISTRY.getParsedServers(); + assertEquals(2, parsedServers.size()); + // now kill one region server + ServerName serverToKill = parsedServers.iterator().next(); + UTIL.getMiniHBaseCluster().killRegionServer(serverToKill); + // wait until the region server disappears + // since the min node limit is 2, this means region server will still contact each other for + // getting bootstrap nodes, instead of requesting master directly, so this assert can make sure + // that the getAllBootstrapNodes works fine, and also the client can communicate with region + // server to update bootstrap nodes + UTIL.waitFor(30000, () -> !REGISTRY.getParsedServers().contains(serverToKill)); + // should still have 2 servers, the remaining 2 live region servers + assertEquals(2, parsedServers.size()); + // make sure the registry still works fine + assertNotNull(REGISTRY.getClusterId().get()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityInfoAndHBasePolicyProviderMatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityInfoAndHBasePolicyProviderMatch.java new file mode 100644 index 000000000000..4a664d4d4d0e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityInfoAndHBasePolicyProviderMatch.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasItem; +import static org.junit.Assert.assertNotNull; + +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.security.authorize.Service; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Make sure that all rpc services for master and region server are properly configured in + * {@link SecurityInfo} and {@link HBasePolicyProvider}. + */ +@Category({ SecurityTests.class, SmallTests.class }) +public class TestSecurityInfoAndHBasePolicyProviderMatch { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSecurityInfoAndHBasePolicyProviderMatch.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + UTIL.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + UTIL.shutdownMiniCluster(); + } + + private void assertServiceMatches(RpcServerInterface rpcServer) { + HBasePolicyProvider provider = new HBasePolicyProvider(); + Set> serviceClasses = + Stream.of(provider.getServices()).map(Service::getProtocol).collect(Collectors.toSet()); + for (BlockingServiceAndInterface bsai : ((RpcServer) rpcServer).getServices()) { + assertNotNull( + "no security info for " + bsai.getBlockingService().getDescriptorForType().getName(), + SecurityInfo.getInfo(bsai.getBlockingService().getDescriptorForType().getName())); + assertThat(serviceClasses, hasItem(bsai.getServiceInterface())); + } + } + + @Test + public void testMatches() { + assertServiceMatches( + UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().getRpcServer()); + assertServiceMatches(UTIL.getMiniHBaseCluster().getRegionServer(0).getRpcServer()); + } +} From fdde2273006dc3b227d82b297b548885bb9cb48a Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 18 Jan 2024 07:41:25 -0500 Subject: [PATCH 216/514] HBASE-28319 Expose DelegatingRpcScheduler as IA.LimitedPrivate (#5638) Signed-off-by: Duo Zhang --- .../hadoop/hbase/ipc/DelegatingRpcScheduler.java | 11 +++++++++++ 1 file changed, 11 insertions(+) rename hbase-server/src/{test => main}/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java (86%) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java similarity index 86% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java index f8ac4a1bb9a1..52bd43cb7f62 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java @@ -17,6 +17,17 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +/** + * Users of the hbase.region.server.rpc.scheduler.factory.class customization config can return an + * implementation which extends this class in order to minimize impact of breaking interface + * changes. + */ +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) +@InterfaceStability.Evolving public class DelegatingRpcScheduler extends RpcScheduler { protected RpcScheduler delegate; From 2f6b6aaaaad6f036a5604378cfa1b84d4a245fca Mon Sep 17 00:00:00 2001 From: Xin Sun Date: Mon, 22 Jan 2024 14:02:11 +0800 Subject: [PATCH 217/514] HBASE-28324 TestRegionNormalizerWorkQueue#testTake is flaky (#5643) Signed-off-by: Duo Zhang --- .../normalizer/TestRegionNormalizerWorkQueue.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java index c6d14c191145..088df7e7376e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java @@ -22,7 +22,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.ArrayList; @@ -41,6 +40,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -186,6 +187,7 @@ public void testTake() throws Exception { final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); final ConcurrentLinkedQueue takeTimes = new ConcurrentLinkedQueue<>(); final AtomicBoolean finished = new AtomicBoolean(false); + final int count = 5; final Runnable consumer = () -> { try { while (!finished.get()) { @@ -199,11 +201,12 @@ public void testTake() throws Exception { CompletableFuture worker = CompletableFuture.runAsync(consumer); final long testStart = System.nanoTime(); - for (int i = 0; i < 5; i++) { + for (int i = 0; i < count; i++) { Thread.sleep(10); queue.put(i); } - + // should have timing information for 5 calls to take. + Waiter.waitFor(HBaseConfiguration.create(), 1000, () -> count == takeTimes.size()); // set finished = true and pipe one more value in case the thread needs an extra pass through // the loop. finished.set(true); @@ -211,9 +214,7 @@ public void testTake() throws Exception { worker.get(1, TimeUnit.SECONDS); final Iterator times = takeTimes.iterator(); - assertTrue("should have timing information for at least 2 calls to take.", - takeTimes.size() >= 5); - for (int i = 0; i < 5; i++) { + for (int i = 0; i < count; i++) { assertThat( "Observations collected in takeTimes should increase by roughly 10ms every interval", times.next(), greaterThan(testStart + TimeUnit.MILLISECONDS.toNanos(i * 10))); From 877830e06c875f9ae1e0d84104c6733cf935babd Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 22 Jan 2024 17:18:06 -0900 Subject: [PATCH 218/514] HBASE-28271 Infinite waiting on lock acquisition by snapshot can result in unresponsive master (#5603) Signed-off-by: Hui Ruan Signed-off-by: Duo Zhang --- .../hbase/master/locking/LockManager.java | 9 - .../master/snapshot/TakeSnapshotHandler.java | 38 ++++- .../TestSnapshotProcedureWithLockTimeout.java | 159 ++++++++++++++++++ 3 files changed, 190 insertions(+), 16 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureWithLockTimeout.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java index 7fab616449c2..eae9c7dcc1b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java @@ -108,15 +108,6 @@ public MasterLock(final RegionInfo[] regionInfos, final String description) { this.description = description; } - /** - * Acquire the lock, waiting indefinitely until the lock is released or the thread is - * interrupted. - * @throws InterruptedException If current thread is interrupted while waiting for the lock - */ - public boolean acquire() throws InterruptedException { - return tryAcquire(0); - } - /** * Acquire the lock within a wait time. * @param timeoutMs The maximum time (in milliseconds) to wait for the lock, 0 to wait diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 58ecaca09ec6..f746adf0b89e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -17,12 +17,16 @@ */ package org.apache.hadoop.hbase.master.snapshot; +import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT; +import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY; + import java.io.IOException; import java.util.List; import java.util.concurrent.CancellationException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -65,6 +69,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements SnapshotSentinel, ForeignExceptionSnare { private static final Logger LOG = LoggerFactory.getLogger(TakeSnapshotHandler.class); + public static final String HBASE_SNAPSHOT_MASTER_LOCK_ACQUIRE_TIMEOUT = + "hbase.snapshot.master.lock.acquire.timeout"; private volatile boolean finished; @@ -85,6 +91,13 @@ public abstract class TakeSnapshotHandler extends EventHandler protected final TableName snapshotTable; protected final SnapshotManifest snapshotManifest; protected final SnapshotManager snapshotManager; + /** + * Snapshot creation requires table lock. If any region of the table is in transition, table lock + * cannot be acquired by LockProcedure and hence snapshot creation could hang for potentially very + * long time. This timeout will ensure snapshot creation fails-fast by waiting for only given + * timeout. + */ + private final long lockAcquireTimeoutMs; protected TableDescriptor htd; @@ -129,6 +142,8 @@ public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices ma "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable, false, true); this.snapshotManifest = SnapshotManifest.create(conf, rootFs, workingDir, snapshot, monitor, status); + this.lockAcquireTimeoutMs = conf.getLong(HBASE_SNAPSHOT_MASTER_LOCK_ACQUIRE_TIMEOUT, + conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)); } private TableDescriptor loadTableDescriptor() throws IOException { @@ -147,12 +162,16 @@ private TableDescriptor loadTableDescriptor() throws IOException { public TakeSnapshotHandler prepare() throws Exception { super.prepare(); // after this, you should ensure to release this lock in case of exceptions - this.tableLock.acquire(); - try { - this.htd = loadTableDescriptor(); // check that .tableinfo is present - } catch (Exception e) { - this.tableLock.release(); - throw e; + if (this.tableLock.tryAcquire(this.lockAcquireTimeoutMs)) { + try { + this.htd = loadTableDescriptor(); // check that .tableinfo is present + } catch (Exception e) { + this.tableLock.release(); + throw e; + } + } else { + LOG.error("Master lock could not be acquired in {} ms", lockAcquireTimeoutMs); + throw new DoNotRetryIOException("Master lock could not be acquired"); } return this; } @@ -176,7 +195,12 @@ public void process() { tableLockToRelease = master.getLockManager().createMasterLock(snapshotTable, LockType.SHARED, this.getClass().getName() + ": take snapshot " + snapshot.getName()); tableLock.release(); - tableLockToRelease.acquire(); + boolean isTableLockAcquired = tableLockToRelease.tryAcquire(this.lockAcquireTimeoutMs); + if (!isTableLockAcquired) { + LOG.error("Could not acquire shared lock on table {} in {} ms", snapshotTable, + lockAcquireTimeoutMs); + throw new IOException("Could not acquire shared lock on table " + snapshotTable); + } } // If regions move after this meta scan, the region specific snapshot should fail, triggering // an external exception that gets captured here. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureWithLockTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureWithLockTimeout.java new file mode 100644 index 000000000000..f780d68acf49 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureWithLockTimeout.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.fail; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotType; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.RegionSplitter; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; + +/** + * Snapshot creation with master lock timeout test. + */ +@Category({ MasterTests.class, MediumTests.class }) +public class TestSnapshotProcedureWithLockTimeout { + + private static final Logger LOG = + LoggerFactory.getLogger(TestSnapshotProcedureWithLockTimeout.class); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSnapshotProcedureWithLockTimeout.class); + + private static HBaseTestingUtil TEST_UTIL; + private HMaster master; + private TableName TABLE_NAME; + private byte[] CF; + private String SNAPSHOT_NAME; + + @Before + public void setup() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + Configuration config = TEST_UTIL.getConfiguration(); + config.setInt("hbase.snapshot.remote.verify.threshold", 1); + config.setLong(TakeSnapshotHandler.HBASE_SNAPSHOT_MASTER_LOCK_ACQUIRE_TIMEOUT, 1L); + TEST_UTIL.startMiniCluster(3); + master = TEST_UTIL.getHBaseCluster().getMaster(); + TABLE_NAME = TableName.valueOf(Bytes.toBytes("TestSnapshotProcedureWithLockTimeout")); + CF = Bytes.toBytes("cf"); + SNAPSHOT_NAME = "SnapshotProcLockTimeout"; + final byte[][] splitKeys = new RegionSplitter.HexStringSplit().split(10); + Table table = TEST_UTIL.createTable(TABLE_NAME, CF, splitKeys); + TEST_UTIL.loadTable(table, CF, false); + } + + @After + public void teardown() throws Exception { + if (this.master != null) { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), + false); + } + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testTakeZkCoordinatedSnapshot() { + for (int i = 0; i < 10; i++) { + try { + // Verify that snapshot creation is not possible because lock could not be + // acquired on time. This can be flaky behavior because even though we provide 1ms + // as lock timeout, it could still be fast enough and eventually lead to successful + // snapshot creation. If that happens, retry again. + testTakeZkCoordinatedSnapshot(i); + break; + } catch (Exception e) { + LOG.error("Error because of faster lock acquisition. retrying....", e); + } + assertNotEquals("Retries exhausted", 9, i); + } + } + + private void testTakeZkCoordinatedSnapshot(int i) throws Exception { + SnapshotDescription snapshotOnSameTable = + new SnapshotDescription(SNAPSHOT_NAME + i, TABLE_NAME, SnapshotType.SKIPFLUSH); + SnapshotProtos.SnapshotDescription snapshotOnSameTableProto = + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotOnSameTable); + Thread second = new Thread("zk-snapshot") { + @Override + public void run() { + try { + master.getSnapshotManager().takeSnapshot(snapshotOnSameTableProto); + } catch (IOException e) { + LOG.error("zk snapshot failed", e); + fail("zk snapshot failed"); + } + } + }; + second.start(); + + Thread.sleep(5000); + boolean snapshotCreated = false; + try { + SnapshotTestingUtils.confirmSnapshotValid(TEST_UTIL, snapshotOnSameTableProto, TABLE_NAME, + CF); + snapshotCreated = true; + } catch (AssertionError e) { + LOG.error("Assertion error..", e); + if ( + e.getMessage() != null && e.getMessage().contains("target snapshot directory") + && e.getMessage().contains("doesn't exist.") + ) { + LOG.debug("Expected behaviour - snapshot could not be created"); + } else { + throw new IOException(e); + } + } + + if (snapshotCreated) { + throw new IOException("Snapshot created successfully"); + } + + // ensure all scheduled procedures are successfully completed + TEST_UTIL.waitFor(4000, 400, + () -> master.getMasterProcedureExecutor().getProcedures().stream() + .filter(masterProcedureEnvProcedure -> masterProcedureEnvProcedure.getState() + == ProcedureProtos.ProcedureState.SUCCESS) + .count() == master.getMasterProcedureExecutor().getProcedures().size()); + } +} From b8cea19d4a65e625f497eb6082de5cfcd23144c8 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 23 Jan 2024 14:21:29 +0100 Subject: [PATCH 219/514] HBASE-28325 Enable infra automation to comment on a Jira when a new PR is posted (#5645) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang --- .asf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.asf.yaml b/.asf.yaml index a991e8528d78..3b7cf932e3b6 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -38,4 +38,4 @@ notifications: commits: commits@hbase.apache.org issues: issues@hbase.apache.org pullrequests: issues@hbase.apache.org - jira_options: link + jira_options: link comment From 3a91c5b6981023a11b272f76a502891c3728f9b2 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 23 Jan 2024 11:05:11 -0500 Subject: [PATCH 220/514] HBASE-28302 Add tracking of fs read times in ScanMetrics and slow logs (#5622) Signed-off-by: Nick Dimiduk --- .../hadoop/hbase/client/OnlineLogRecord.java | 29 +++-- .../client/metrics/ServerSideScanMetrics.java | 4 + .../hbase/shaded/protobuf/ProtobufUtil.java | 1 + .../hbase/client/TestOnlineLogRecord.java | 24 ++-- .../hadoop/hbase/io/MetricsIOSource.java | 5 + .../hadoop/hbase/io/MetricsIOSourceImpl.java | 8 ++ .../protobuf/server/region/TooSlowLog.proto | 2 + .../org/apache/hadoop/hbase/io/MetricsIO.java | 4 + .../apache/hadoop/hbase/io/hfile/HFile.java | 7 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 5 +- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 4 + .../apache/hadoop/hbase/ipc/RpcServer.java | 14 ++- .../apache/hadoop/hbase/ipc/ServerCall.java | 11 ++ .../hbase/namequeues/RpcLogDetails.java | 9 +- .../namequeues/impl/SlowLogQueueService.java | 4 +- .../hbase/regionserver/RSRpcServices.java | 3 + ...stServerSideScanMetricsFromClientSide.java | 32 ++++- .../namequeues/TestNamedQueueRecorder.java | 18 ++- .../hbase/namequeues/TestRpcLogDetails.java | 12 +- .../hbase/namequeues/TestTooLargeLog.java | 2 +- .../region/TestRegionProcedureStore.java | 10 ++ .../hadoop/hbase/thrift2/ThriftUtilities.java | 1 + .../thrift2/generated/TOnlineLogRecord.java | 111 +++++++++++++++++- .../apache/hadoop/hbase/thrift2/hbase.thrift | 1 + 24 files changed, 275 insertions(+), 46 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java index d9fd51e80a95..26979129cf18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java @@ -81,6 +81,7 @@ final public class OnlineLogRecord extends LogEntry { private final int queueTime; private final long responseSize; private final long blockBytesScanned; + private final long fsReadTime; private final String clientAddress; private final String serverClass; private final String methodName; @@ -120,6 +121,10 @@ public long getBlockBytesScanned() { return blockBytesScanned; } + public long getFsReadTime() { + return fsReadTime; + } + public String getClientAddress() { return clientAddress; } @@ -178,16 +183,18 @@ public Map getConnectionAttributes() { } OnlineLogRecord(final long startTime, final int processingTime, final int queueTime, - final long responseSize, final long blockBytesScanned, final String clientAddress, - final String serverClass, final String methodName, final String callDetails, final String param, - final String regionName, final String userName, final int multiGetsCount, - final int multiMutationsCount, final int multiServiceCalls, final Scan scan, - final Map requestAttributes, final Map connectionAttributes) { + final long responseSize, final long blockBytesScanned, final long fsReadTime, + final String clientAddress, final String serverClass, final String methodName, + final String callDetails, final String param, final String regionName, final String userName, + final int multiGetsCount, final int multiMutationsCount, final int multiServiceCalls, + final Scan scan, final Map requestAttributes, + final Map connectionAttributes) { this.startTime = startTime; this.processingTime = processingTime; this.queueTime = queueTime; this.responseSize = responseSize; this.blockBytesScanned = blockBytesScanned; + this.fsReadTime = fsReadTime; this.clientAddress = clientAddress; this.serverClass = serverClass; this.methodName = methodName; @@ -209,6 +216,7 @@ public static class OnlineLogRecordBuilder { private int queueTime; private long responseSize; private long blockBytesScanned; + private long fsReadTime; private String clientAddress; private String serverClass; private String methodName; @@ -251,6 +259,11 @@ public OnlineLogRecordBuilder setBlockBytesScanned(long blockBytesScanned) { return this; } + public OnlineLogRecordBuilder setFsReadTime(long fsReadTime) { + this.fsReadTime = fsReadTime; + return this; + } + public OnlineLogRecordBuilder setClientAddress(String clientAddress) { this.clientAddress = clientAddress; return this; @@ -319,9 +332,9 @@ public OnlineLogRecordBuilder setRequestAttributes(Map requestAt public OnlineLogRecord build() { return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, - blockBytesScanned, clientAddress, serverClass, methodName, callDetails, param, regionName, - userName, multiGetsCount, multiMutationsCount, multiServiceCalls, scan, requestAttributes, - connectionAttributes); + blockBytesScanned, fsReadTime, clientAddress, serverClass, methodName, callDetails, param, + regionName, userName, multiGetsCount, multiMutationsCount, multiServiceCalls, scan, + requestAttributes, connectionAttributes); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java index cf730501be0a..ff83584ccb44 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java @@ -50,6 +50,8 @@ protected AtomicLong createCounter(String counterName) { public static final String BLOCK_BYTES_SCANNED_KEY_METRIC_NAME = "BLOCK_BYTES_SCANNED"; + public static final String FS_READ_TIME_METRIC_NAME = "FS_READ_TIME"; + /** * number of rows filtered during scan RPC */ @@ -65,6 +67,8 @@ protected AtomicLong createCounter(String counterName) { public final AtomicLong countOfBlockBytesScanned = createCounter(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME); + public final AtomicLong fsReadTime = createCounter(FS_READ_TIME_METRIC_NAME); + public void setCounter(String counterName, long value) { AtomicLong c = this.counters.get(counterName); if (c != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 598ad932e679..e50b54e8eb02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -3462,6 +3462,7 @@ private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLog .setQueueTime(slowLogPayload.getQueueTime()).setRegionName(slowLogPayload.getRegionName()) .setResponseSize(slowLogPayload.getResponseSize()) .setBlockBytesScanned(slowLogPayload.getBlockBytesScanned()) + .setFsReadTime(slowLogPayload.getFsReadTime()) .setServerClass(slowLogPayload.getServerClass()).setStartTime(slowLogPayload.getStartTime()) .setUserName(slowLogPayload.getUserName()) .setRequestAttributes(convertNameBytesPairsToMap(slowLogPayload.getRequestAttributeList())) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java index fe753973ae20..a16993d56591 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java @@ -46,15 +46,15 @@ public void itSerializesScan() { scan.withStopRow(Bytes.toBytes(456)); String expectedOutput = "{\n" + " \"startTime\": 1,\n" + " \"processingTime\": 2,\n" + " \"queueTime\": 3,\n" + " \"responseSize\": 4,\n" + " \"blockBytesScanned\": 5,\n" - + " \"multiGetsCount\": 6,\n" + " \"multiMutationsCount\": 7,\n" + " \"scan\": {\n" - + " \"startRow\": \"\\\\x00\\\\x00\\\\x00{\",\n" + + " \"fsReadTime\": 6,\n" + " \"multiGetsCount\": 6,\n" + " \"multiMutationsCount\": 7,\n" + + " \"scan\": {\n" + " \"startRow\": \"\\\\x00\\\\x00\\\\x00{\",\n" + " \"stopRow\": \"\\\\x00\\\\x00\\\\x01\\\\xC8\",\n" + " \"batch\": -1,\n" + " \"cacheBlocks\": true,\n" + " \"totalColumns\": 0,\n" + " \"maxResultSize\": -1,\n" + " \"families\": {},\n" + " \"caching\": -1,\n" + " \"maxVersions\": 1,\n" + " \"timeRange\": [\n" + " 0,\n" + " 9223372036854775807\n" + " ]\n" + " }\n" + "}"; - OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, - 6, 7, 0, scan, Collections.emptyMap(), Collections.emptyMap()); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, 6, null, null, null, null, null, null, + null, 6, 7, 0, scan, Collections.emptyMap(), Collections.emptyMap()); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); Assert.assertEquals(actualOutput, expectedOutput); @@ -67,8 +67,8 @@ public void itSerializesRequestAttributes() { Set expectedOutputs = ImmutableSet. builder().add("requestAttributes").add("\"r\": \"1\"") .add("\"2\": \"\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\"").build(); - OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, - 6, 7, 0, null, requestAttributes, Collections.emptyMap()); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, 6, null, null, null, null, null, null, + null, 6, 7, 0, null, requestAttributes, Collections.emptyMap()); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); expectedOutputs.forEach(expected -> Assert.assertTrue(actualOutput.contains(expected))); @@ -76,8 +76,8 @@ ImmutableSet. builder().add("requestAttributes").add("\"r\": \"1\"") @Test public void itOmitsEmptyRequestAttributes() { - OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, - 6, 7, 0, null, Collections.emptyMap(), Collections.emptyMap()); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, 6, null, null, null, null, null, null, + null, 6, 7, 0, null, Collections.emptyMap(), Collections.emptyMap()); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); Assert.assertFalse(actualOutput.contains("requestAttributes")); @@ -90,8 +90,8 @@ public void itSerializesConnectionAttributes() { Set expectedOutputs = ImmutableSet. builder().add("connectionAttributes").add("\"c\": \"1\"") .add("\"2\": \"\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\"").build(); - OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, - 6, 7, 0, null, Collections.emptyMap(), connectionAttributes); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, 6, null, null, null, null, null, null, + null, 6, 7, 0, null, Collections.emptyMap(), connectionAttributes); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); expectedOutputs.forEach(expected -> Assert.assertTrue(actualOutput.contains(expected))); @@ -99,8 +99,8 @@ ImmutableSet. builder().add("connectionAttributes").add("\"c\": \"1\"") @Test public void itOmitsEmptyConnectionAttributes() { - OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, null, null, null, null, null, null, null, - 6, 7, 0, null, Collections.emptyMap(), Collections.emptyMap()); + OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, 6, null, null, null, null, null, null, + null, 6, 7, 0, null, Collections.emptyMap(), Collections.emptyMap()); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); Assert.assertFalse(actualOutput.contains("connectionAttributes")); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java index af7e87483d1d..5b4fc4b2c69a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java @@ -47,6 +47,9 @@ public interface MetricsIOSource extends BaseSource { String FS_PREAD_TIME_HISTO_KEY = "fsPReadTime"; String FS_WRITE_HISTO_KEY = "fsWriteTime"; + String SLOW_FS_READS_KEY = "fsSlowReadsCount"; + String SLOW_FS_READS_DESC = "Number of HFile reads which were slower than a configured threshold"; + String CHECKSUM_FAILURES_KEY = "fsChecksumFailureCount"; String FS_READ_TIME_HISTO_DESC = @@ -76,4 +79,6 @@ public interface MetricsIOSource extends BaseSource { * @param t time it took, in milliseconds */ void updateFsWriteTime(long t); + + void incrSlowFsRead(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java index 6ef5d180cd5e..5aca9a3d84b9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java @@ -22,6 +22,7 @@ import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.Interns; +import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -32,6 +33,7 @@ public class MetricsIOSourceImpl extends BaseSourceImpl implements MetricsIOSour private final MetricHistogram fsReadTimeHisto; private final MetricHistogram fsPReadTimeHisto; private final MetricHistogram fsWriteTimeHisto; + private final MutableFastCounter fsSlowReads; public MetricsIOSourceImpl(MetricsIOWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); @@ -49,6 +51,7 @@ public MetricsIOSourceImpl(String metricsName, String metricsDescription, String getMetricsRegistry().newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); fsWriteTimeHisto = getMetricsRegistry().newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); + fsSlowReads = getMetricsRegistry().newCounter(SLOW_FS_READS_KEY, SLOW_FS_READS_DESC, 0L); } @Override @@ -66,6 +69,11 @@ public void updateFsWriteTime(long t) { fsWriteTimeHisto.add(t); } + @Override + public void incrSlowFsRead() { + fsSlowReads.incr(); + } + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto index 4c275948b277..80b984e999cd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/TooSlowLog.proto @@ -53,6 +53,8 @@ message SlowLogPayload { repeated NameBytesPair connection_attribute = 18; repeated NameBytesPair request_attribute = 19; + optional int64 fs_read_time = 20; + // SLOW_LOG is RPC call slow in nature whereas LARGE_LOG is RPC call quite large. // Majority of times, slow logs are also large logs and hence, ALL is combination of // both diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java index 58e6f7d01b71..4d2437d418bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -75,4 +75,8 @@ public void updateFsPreadTime(long t) { public void updateFsWriteTime(long t) { source.updateFsWriteTime(t); } + + public void incrSlowFsRead() { + source.incrSlowFsRead(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 207c99866511..84fe9387d6e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.util.BloomFilterWriter; @@ -187,12 +188,16 @@ public static final long getChecksumFailuresCount() { return CHECKSUM_FAILURES.sum(); } - public static final void updateReadLatency(long latencyMillis, boolean pread) { + public static final void updateReadLatency(long latencyMillis, boolean pread, boolean tooSlow) { + RpcServer.getCurrentCall().ifPresent(call -> call.updateFsReadTime(latencyMillis)); if (pread) { MetricsIO.getInstance().updateFsPreadTime(latencyMillis); } else { MetricsIO.getInstance().updateFsReadTime(latencyMillis); } + if (tooSlow) { + MetricsIO.getInstance().incrSlowFsRead(); + } } public static final void updateWriteLatency(long latencyMillis) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index a3ead34730fb..47c20b691b4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1826,8 +1826,9 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, int sizeWithoutChecksum = curBlock.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); curBlock.limit(sizeWithoutChecksum); long duration = EnvironmentEdgeManager.currentTime() - startTime; + boolean tooSlow = this.readWarnTime >= 0 && duration > this.readWarnTime; if (updateMetrics) { - HFile.updateReadLatency(duration, pread); + HFile.updateReadLatency(duration, pread, tooSlow); } // The onDiskBlock will become the headerAndDataBuffer for this block. // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already @@ -1839,7 +1840,7 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, hFileBlock.sanityCheckUncompressed(); } LOG.trace("Read {} in {} ms", hFileBlock, duration); - if (!LOG.isTraceEnabled() && this.readWarnTime >= 0 && duration > this.readWarnTime) { + if (!LOG.isTraceEnabled() && tooSlow) { LOG.warn("Read Block Slow: read {} cost {} ms, threshold = {} ms", hFileBlock, duration, this.readWarnTime); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 260d6e1a9803..2d06aa7c47af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -132,4 +132,8 @@ public interface RpcCall extends RpcCallContext { /** Returns A short string format of this call without possibly lengthy params */ String toShortString(); + + void updateFsReadTime(long latencyMillis); + + long getFsReadTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index d3ec4ff8c73e..0876a1fd55f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -444,14 +444,17 @@ public Pair call(RpcCall call, MonitoredRPCHandler status) int totalTime = (int) (endTime - receiveTime); if (LOG.isTraceEnabled()) { LOG.trace( - "{}, response: {}, receiveTime: {}, queueTime: {}, processingTime: {}, totalTime: {}", + "{}, response: {}, receiveTime: {}, queueTime: {}, processingTime: {}, " + + "totalTime: {}, fsReadTime: {}", CurCall.get().toString(), TextFormat.shortDebugString(result), - CurCall.get().getReceiveTime(), qTime, processingTime, totalTime); + CurCall.get().getReceiveTime(), qTime, processingTime, totalTime, + CurCall.get().getFsReadTime()); } // Use the raw request call size for now. long requestSize = call.getSize(); long responseSize = result.getSerializedSize(); long responseBlockSize = call.getBlockBytesScanned(); + long fsReadTime = call.getFsReadTime(); if (call.isClientCellBlockSupported()) { // Include the payload size in HBaseRpcController responseSize += call.getResponseCellSize(); @@ -472,13 +475,13 @@ public Pair call(RpcCall call, MonitoredRPCHandler status) // note that large responses will often also be slow. logResponse(param, md.getName(), md.getName() + "(" + param.getClass().getName() + ")", tooLarge, tooSlow, status.getClient(), startTime, processingTime, qTime, responseSize, - responseBlockSize, userName); + responseBlockSize, fsReadTime, userName); if (this.namedQueueRecorder != null && this.isOnlineLogProviderEnabled) { // send logs to ring buffer owned by slowLogRecorder final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); this.namedQueueRecorder.addRecord(new RpcLogDetails(call, param, status.getClient(), - responseSize, responseBlockSize, className, tooSlow, tooLarge)); + responseSize, responseBlockSize, fsReadTime, className, tooSlow, tooLarge)); } } return new Pair<>(result, controller.cellScanner()); @@ -522,7 +525,7 @@ public Pair call(RpcCall call, MonitoredRPCHandler status) */ void logResponse(Message param, String methodName, String call, boolean tooLarge, boolean tooSlow, String clientAddress, long startTime, int processingTime, int qTime, long responseSize, - long blockBytesScanned, String userName) { + long blockBytesScanned, long fsReadTime, String userName) { final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); // base information that is reported regardless of type of call Map responseInfo = new HashMap<>(); @@ -531,6 +534,7 @@ void logResponse(Message param, String methodName, String call, boolean tooLarge responseInfo.put("queuetimems", qTime); responseInfo.put("responsesize", responseSize); responseInfo.put("blockbytesscanned", blockBytesScanned); + responseInfo.put("fsreadtime", fsReadTime); responseInfo.put("client", clientAddress); responseInfo.put("class", className); responseInfo.put("method", methodName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index ed688977b963..a2c578fd6664 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -99,6 +99,7 @@ public abstract class ServerCall implements RpcCa private long responseCellSize = 0; private long responseBlockSize = 0; + private long fsReadTimeMillis = 0; // cumulative size of serialized exceptions private long exceptionSize = 0; private final boolean retryImmediatelySupported; @@ -567,4 +568,14 @@ public int getRemotePort() { public synchronized BufferChain getResponse() { return response; } + + @Override + public void updateFsReadTime(long latencyMillis) { + fsReadTimeMillis += latencyMillis; + } + + @Override + public long getFsReadTime() { + return fsReadTimeMillis; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java index 235d82302d64..263fff66a738 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/RpcLogDetails.java @@ -42,6 +42,7 @@ public class RpcLogDetails extends NamedQueuePayload { private final String clientAddress; private final long responseSize; private final long blockBytesScanned; + private final long fsReadTime; private final String className; private final boolean isSlowLog; private final boolean isLargeLog; @@ -49,12 +50,14 @@ public class RpcLogDetails extends NamedQueuePayload { private final Map requestAttributes; public RpcLogDetails(RpcCall rpcCall, Message param, String clientAddress, long responseSize, - long blockBytesScanned, String className, boolean isSlowLog, boolean isLargeLog) { + long blockBytesScanned, long fsReadTime, String className, boolean isSlowLog, + boolean isLargeLog) { super(SLOW_LOG_EVENT); this.rpcCall = rpcCall; this.clientAddress = clientAddress; this.responseSize = responseSize; this.blockBytesScanned = blockBytesScanned; + this.fsReadTime = fsReadTime; this.className = className; this.isSlowLog = isSlowLog; this.isLargeLog = isLargeLog; @@ -92,6 +95,10 @@ public long getBlockBytesScanned() { return blockBytesScanned; } + public long getFsReadTime() { + return fsReadTime; + } + public String getClassName() { return className; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java index fb29b8563ef7..ea4e286bf43f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java @@ -124,6 +124,7 @@ public void consumeEventFromDisruptor(NamedQueuePayload namedQueuePayload) { final String clientAddress = rpcLogDetails.getClientAddress(); final long responseSize = rpcLogDetails.getResponseSize(); final long blockBytesScanned = rpcLogDetails.getBlockBytesScanned(); + final long fsReadTime = rpcLogDetails.getFsReadTime(); final String className = rpcLogDetails.getClassName(); final TooSlowLog.SlowLogPayload.Type type = getLogType(rpcLogDetails); if (type == null) { @@ -168,7 +169,8 @@ public void consumeEventFromDisruptor(NamedQueuePayload namedQueuePayload) { .setProcessingTime(processingTime).setQueueTime(qTime) .setRegionName(slowLogParams != null ? slowLogParams.getRegionName() : StringUtils.EMPTY) .setResponseSize(responseSize).setBlockBytesScanned(blockBytesScanned) - .setServerClass(className).setStartTime(startTime).setType(type).setUserName(userName) + .setFsReadTime(fsReadTime).setServerClass(className).setStartTime(startTime).setType(type) + .setUserName(userName) .addAllRequestAttribute(buildNameBytesPairs(rpcLogDetails.getRequestAttributes())) .addAllConnectionAttribute(buildNameBytesPairs(rpcLogDetails.getConnectionAttributes())); if (slowLogParams != null && slowLogParams.getScan() != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 0fe6f6476a6c..05d7c2e56055 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3454,6 +3454,9 @@ private void scan(HBaseRpcController controller, ScanRequest request, RegionScan // from block size progress before writing into the response scannerContext.getMetrics().countOfBlockBytesScanned .set(scannerContext.getBlockSizeProgress()); + if (rpcCall != null) { + scannerContext.getMetrics().fsReadTime.set(rpcCall.getFsReadTime()); + } Map metrics = scannerContext.getMetrics().getMetricsMap(); ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder(); NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java index b80cd207683c..6ac87ce03026 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -195,6 +196,18 @@ public void testRowsSeenMetric() throws Exception { } } + @Test + public void testFsReadTimeMetric() throws Exception { + // write some new puts and flush, as an easy way to ensure the read blocks are not cached + // so that we go into the fs write code path + List puts = createPuts(ROWS, FAMILIES, QUALIFIERS, VALUE); + TABLE.put(puts); + TEST_UTIL.flush(TABLE_NAME); + Scan scan = new Scan(); + scan.setScanMetricsEnabled(true); + testMetric(scan, ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME, 0, CompareOperator.GREATER); + } + private void testRowsSeenMetric(Scan baseScan) throws Exception { Scan scan; scan = new Scan(baseScan); @@ -333,6 +346,11 @@ private void testRowsFilteredMetric(Scan baseScan, Filter filter, int expectedNu * @throws Exception on unexpected failure */ private void testMetric(Scan scan, String metricKey, long expectedValue) throws Exception { + testMetric(scan, metricKey, expectedValue, CompareOperator.EQUAL); + } + + private void testMetric(Scan scan, String metricKey, long expectedValue, + CompareOperator compareOperator) throws Exception { assertTrue("Scan should be configured to record metrics", scan.isScanMetricsEnabled()); ResultScanner scanner = TABLE.getScanner(scan); // Iterate through all the results @@ -341,11 +359,17 @@ private void testMetric(Scan scan, String metricKey, long expectedValue) throws } scanner.close(); ScanMetrics metrics = scanner.getScanMetrics(); - assertTrue("Metrics are null", metrics != null); + assertNotNull("Metrics are null", metrics); assertTrue("Metric : " + metricKey + " does not exist", metrics.hasCounter(metricKey)); final long actualMetricValue = metrics.getCounter(metricKey).get(); - assertEquals( - "Metric: " + metricKey + " Expected: " + expectedValue + " Actual: " + actualMetricValue, - expectedValue, actualMetricValue); + if (compareOperator == CompareOperator.EQUAL) { + assertEquals( + "Metric: " + metricKey + " Expected: " + expectedValue + " Actual: " + actualMetricValue, + expectedValue, actualMetricValue); + } else { + assertTrue( + "Metric: " + metricKey + " Expected: > " + expectedValue + " Actual: " + actualMetricValue, + actualMetricValue > expectedValue); + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 35a1757115c9..00953353187e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -667,13 +667,13 @@ public void testOnlineSlowLogConnectionAttributes() throws Exception { static RpcLogDetails getRpcLogDetails(String userName, String clientAddress, String className, int forcedParamIndex) { RpcCall rpcCall = getRpcCall(userName, forcedParamIndex); - return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, 0, className, true, + return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, 0, 0, className, true, true); } static RpcLogDetails getRpcLogDetails(String userName, String clientAddress, String className) { RpcCall rpcCall = getRpcCall(userName); - return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, 0, className, true, + return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, 0, 0, className, true, true); } @@ -685,8 +685,8 @@ private static RpcLogDetails getRpcLogDetailsOfScan() { private RpcLogDetails getRpcLogDetails(String userName, String clientAddress, String className, boolean isSlowLog, boolean isLargeLog) { RpcCall rpcCall = getRpcCall(userName); - return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, 0, className, isSlowLog, - isLargeLog); + return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, 0, 0, className, + isSlowLog, isLargeLog); } private static RpcCall getRpcCall(String userName) { @@ -859,6 +859,16 @@ public long getResponseExceptionSize() { @Override public void incrementResponseExceptionSize(long exceptionSize) { } + + @Override + public void updateFsReadTime(long latencyMillis) { + + } + + @Override + public long getFsReadTime() { + return 0; + } }; return rpcCall; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java index 8a93f2d0ff54..67d8a2579097 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java @@ -80,7 +80,7 @@ public void itDeepCopiesRpcLogDetailsParams() throws IOException { ProtobufUtil.mergeFrom(messageBuilder, cis, buffer.capacity()); Message message = messageBuilder.build(); RpcLogDetails rpcLogDetails = - new RpcLogDetails(getRpcCall(message), message, null, 0L, 0L, null, true, false); + new RpcLogDetails(getRpcCall(message), message, null, 0L, 0L, 0, null, true, false); // log's scan should be equal ClientProtos.Scan logScan = ((ClientProtos.ScanRequest) rpcLogDetails.getParam()).getScan(); @@ -258,6 +258,16 @@ public long getResponseExceptionSize() { @Override public void incrementResponseExceptionSize(long exceptionSize) { } + + @Override + public void updateFsReadTime(long latencyMillis) { + + } + + @Override + public long getFsReadTime() { + return 0; + } }; return rpcCall; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java index da3d97547645..fdc3e288bfed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java @@ -118,6 +118,6 @@ public void testLogLargeBlockBytesScanned() throws IOException { record.getBlockBytesScanned() >= 100); assertTrue("expected " + record.getResponseSize() + " to be < 100", record.getResponseSize() < 100); - + assertTrue("expected " + record.getFsReadTime() + " to be > 0", record.getFsReadTime() > 0); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index 305f0e29e952..d069c2560a55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -320,6 +320,16 @@ public long getResponseExceptionSize() { @Override public void incrementResponseExceptionSize(long exceptionSize) { } + + @Override + public void updateFsReadTime(long latencyMillis) { + + } + + @Override + public long getFsReadTime() { + return 0; + } }; } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index 76bc96df05c5..a02f944e12a7 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -1647,6 +1647,7 @@ public static LogQueryFilter getSlowLogQueryFromThrift(TLogQueryFilter tLogQuery tOnlineLogRecord.setRegionName(slowLogRecord.getRegionName()); tOnlineLogRecord.setResponseSize(slowLogRecord.getResponseSize()); tOnlineLogRecord.setBlockBytesScanned(slowLogRecord.getBlockBytesScanned()); + tOnlineLogRecord.setFsReadTime(slowLogRecord.getFsReadTime()); tOnlineLogRecord.setServerClass(slowLogRecord.getServerClass()); tOnlineLogRecord.setStartTime(slowLogRecord.getStartTime()); tOnlineLogRecord.setUserName(slowLogRecord.getUserName()); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java index 672b8b96d551..c3d6cba7ad20 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.client.OnlineLogRecord */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2023-02-04") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-01-12") public class TOnlineLogRecord implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOnlineLogRecord"); @@ -30,6 +30,7 @@ public class TOnlineLogRecord implements org.apache.thrift.TBase byName = new java.util.HashMap(); @@ -112,6 +115,8 @@ public static _Fields findByThriftId(int fieldId) { return REGION_NAME; case 15: // BLOCK_BYTES_SCANNED return BLOCK_BYTES_SCANNED; + case 16: // FS_READ_TIME + return FS_READ_TIME; default: return null; } @@ -161,8 +166,9 @@ public java.lang.String getFieldName() { private static final int __MULTIMUTATIONSCOUNT_ISSET_ID = 5; private static final int __MULTISERVICECALLS_ISSET_ID = 6; private static final int __BLOCKBYTESSCANNED_ISSET_ID = 7; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.REGION_NAME,_Fields.BLOCK_BYTES_SCANNED}; + private static final int __FSREADTIME_ISSET_ID = 8; + private short __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.REGION_NAME,_Fields.BLOCK_BYTES_SCANNED,_Fields.FS_READ_TIME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -196,6 +202,8 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.BLOCK_BYTES_SCANNED, new org.apache.thrift.meta_data.FieldMetaData("blockBytesScanned", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.FS_READ_TIME, new org.apache.thrift.meta_data.FieldMetaData("fsReadTime", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOnlineLogRecord.class, metaDataMap); } @@ -275,6 +283,7 @@ public TOnlineLogRecord(TOnlineLogRecord other) { this.regionName = other.regionName; } this.blockBytesScanned = other.blockBytesScanned; + this.fsReadTime = other.fsReadTime; } public TOnlineLogRecord deepCopy() { @@ -306,6 +315,8 @@ public void clear() { this.regionName = null; setBlockBytesScannedIsSet(false); this.blockBytesScanned = 0; + setFsReadTimeIsSet(false); + this.fsReadTime = 0; } public long getStartTime() { @@ -667,6 +678,29 @@ public void setBlockBytesScannedIsSet(boolean value) { __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOCKBYTESSCANNED_ISSET_ID, value); } + public long getFsReadTime() { + return this.fsReadTime; + } + + public TOnlineLogRecord setFsReadTime(long fsReadTime) { + this.fsReadTime = fsReadTime; + setFsReadTimeIsSet(true); + return this; + } + + public void unsetFsReadTime() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __FSREADTIME_ISSET_ID); + } + + /** Returns true if field fsReadTime is set (has been assigned a value) and false otherwise */ + public boolean isSetFsReadTime() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __FSREADTIME_ISSET_ID); + } + + public void setFsReadTimeIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __FSREADTIME_ISSET_ID, value); + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case START_TIME: @@ -789,6 +823,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case FS_READ_TIME: + if (value == null) { + unsetFsReadTime(); + } else { + setFsReadTime((java.lang.Long)value); + } + break; + } } @@ -840,6 +882,9 @@ public java.lang.Object getFieldValue(_Fields field) { case BLOCK_BYTES_SCANNED: return getBlockBytesScanned(); + case FS_READ_TIME: + return getFsReadTime(); + } throw new java.lang.IllegalStateException(); } @@ -881,6 +926,8 @@ public boolean isSet(_Fields field) { return isSetRegionName(); case BLOCK_BYTES_SCANNED: return isSetBlockBytesScanned(); + case FS_READ_TIME: + return isSetFsReadTime(); } throw new java.lang.IllegalStateException(); } @@ -1033,6 +1080,15 @@ public boolean equals(TOnlineLogRecord that) { return false; } + boolean this_present_fsReadTime = true && this.isSetFsReadTime(); + boolean that_present_fsReadTime = true && that.isSetFsReadTime(); + if (this_present_fsReadTime || that_present_fsReadTime) { + if (!(this_present_fsReadTime && that_present_fsReadTime)) + return false; + if (this.fsReadTime != that.fsReadTime) + return false; + } + return true; } @@ -1086,6 +1142,10 @@ public int hashCode() { if (isSetBlockBytesScanned()) hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(blockBytesScanned); + hashCode = hashCode * 8191 + ((isSetFsReadTime()) ? 131071 : 524287); + if (isSetFsReadTime()) + hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(fsReadTime); + return hashCode; } @@ -1247,6 +1307,16 @@ public int compareTo(TOnlineLogRecord other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetFsReadTime(), other.isSetFsReadTime()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFsReadTime()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fsReadTime, other.fsReadTime); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1359,6 +1429,12 @@ public java.lang.String toString() { sb.append(this.blockBytesScanned); first = false; } + if (isSetFsReadTime()) { + if (!first) sb.append(", "); + sb.append("fsReadTime:"); + sb.append(this.fsReadTime); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1549,6 +1625,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 16: // FS_READ_TIME + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.fsReadTime = iprot.readI64(); + struct.setFsReadTimeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1648,6 +1732,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TOnlineLogRecord s oprot.writeI64(struct.blockBytesScanned); oprot.writeFieldEnd(); } + if (struct.isSetFsReadTime()) { + oprot.writeFieldBegin(FS_READ_TIME_FIELD_DESC); + oprot.writeI64(struct.fsReadTime); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1685,13 +1774,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord st if (struct.isSetBlockBytesScanned()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetFsReadTime()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetRegionName()) { oprot.writeString(struct.regionName); } if (struct.isSetBlockBytesScanned()) { oprot.writeI64(struct.blockBytesScanned); } + if (struct.isSetFsReadTime()) { + oprot.writeI64(struct.fsReadTime); + } } @Override @@ -1723,7 +1818,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord str struct.setMultiMutationsCountIsSet(true); struct.multiServiceCalls = iprot.readI32(); struct.setMultiServiceCallsIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.regionName = iprot.readString(); struct.setRegionNameIsSet(true); @@ -1732,6 +1827,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord str struct.blockBytesScanned = iprot.readI64(); struct.setBlockBytesScannedIsSet(true); } + if (incoming.get(2)) { + struct.fsReadTime = iprot.readI64(); + struct.setFsReadTimeIsSet(true); + } } } diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift index a32f266cf313..ed3fdf32b973 100644 --- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift +++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift @@ -499,6 +499,7 @@ struct TOnlineLogRecord { 13: required i32 multiServiceCalls 14: optional string regionName 15: optional i64 blockBytesScanned + 16: optional i64 fsReadTime } // From 0c48e5a901f2ad0ef6bab689eb32e6bbd5a31c1c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 24 Jan 2024 22:00:38 +0800 Subject: [PATCH 221/514] HBASE-28326 All nightly jobs are failing (#5646) Use downloads.a.o instead of a.o/dist Signed-off-by: Xin Sun Signed-off-by: Nihal Jain --- dev-support/Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index bb79ea8928ab..7d2ef32df6eb 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -91,7 +91,7 @@ pipeline { rm -rf "${YETUS_DIR}" "${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ --working-dir "${WORKSPACE}/downloads-yetus" \ - --keys 'https://www.apache.org/dist/yetus/KEYS' \ + --keys 'https://downloads.apache.org/yetus/KEYS' \ --verify-tar-gz \ "${WORKSPACE}/yetus-${YETUS_RELEASE}-bin.tar.gz" \ "yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz" @@ -138,7 +138,7 @@ pipeline { echo "Ensure we have a copy of Hadoop ${HADOOP2_VERSION}" "${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ --working-dir "${WORKSPACE}/downloads-hadoop-2" \ - --keys 'http://www.apache.org/dist/hadoop/common/KEYS' \ + --keys 'https://downloads.apache.org/hadoop/common/KEYS' \ --verify-tar-gz \ "${WORKSPACE}/hadoop-${HADOOP2_VERSION}-bin.tar.gz" \ "hadoop/common/hadoop-${HADOOP2_VERSION}/hadoop-${HADOOP2_VERSION}.tar.gz" @@ -166,7 +166,7 @@ pipeline { echo "Ensure we have a copy of Hadoop ${HADOOP3_VERSION}" "${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ --working-dir "${WORKSPACE}/downloads-hadoop-3" \ - --keys 'http://www.apache.org/dist/hadoop/common/KEYS' \ + --keys 'https://downloads.apache.org/hadoop/common/KEYS' \ --verify-tar-gz \ "${WORKSPACE}/hadoop-${HADOOP3_VERSION}-bin.tar.gz" \ "hadoop/common/hadoop-${HADOOP3_VERSION}/hadoop-${HADOOP3_VERSION}.tar.gz" From 354a3a2774d203f9299b3368c1ef06a5e4234808 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 24 Jan 2024 22:02:00 +0800 Subject: [PATCH 222/514] HBASE-28322 Attach the design doc in HBASE-26220 to our code base (#5640) Signed-off-by: Yi Mei Signed-off-by: Xin Sun --- ...vers to sync the list for bootstrap node.pdf | Bin 0 -> 43316 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 dev-support/design-docs/HBASE-26220 Use P2P communicate between region servers to sync the list for bootstrap node.pdf diff --git a/dev-support/design-docs/HBASE-26220 Use P2P communicate between region servers to sync the list for bootstrap node.pdf b/dev-support/design-docs/HBASE-26220 Use P2P communicate between region servers to sync the list for bootstrap node.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7fcd1f6b9cd991cf0c7d3dc63d471e535ad64d9f GIT binary patch literal 43316 zcmd3NV~}RevhK8P`)y5Q+O};Q)3$Bfwmogzwrx+lr|rJ|eP^H8`|KU}#`$?8-iTFi z#Zy_Cm07iFWz|C}CnQ2mPs0RFI)8Su3r!E81K8-BLvwM_Dw#Q28Ue_~1o#z&sTo)p z80Y}X4n_bu204I%jg^&?wV8pQqY*&g$kD~f$Qod8WMXDx4RA2BcQ&$j065wJ9Ner8 z0FI_c0829mM}V=7JwV^a#?isiUe6X_ZDVLe0Zl7sZ)50WU}O&3>Pr<_j{Z;%+JH~%%X`lz7ho%*f2GGjb*jwpY{u9Lf zPmqY2rQ=s8v?7*Y8Vea2e02^@D`jMD;%EwBW?*LL;Q@T9Wu#{X?V5R^y&g@{4Cj?o zok|c&6rWGShuG8Jbes9}6Mz8%bc1;O=_ki+HEmq7!a?bD?+|$nQnLLip6&B8t=O$Y!K=9N z_IA6S^YQ2I*d|-9<tkR5}>!$s*BcyJyp zKbK-@q#8p_l6tuQ`s#;JC z4cmVJw?$>{58^lm>I?Unu~e;*NqCcRqqnq6 z8H3-_EFA!8>EerQx%YEz+d*&^n6kC3Bp|$9m>AiVm_<{_bcxVnVnZqt?>nJ95A+Qv z>J*aftmF8E-V9<=o%W9M4Lsh=X-s|kAXc94s+s$c9Ua^0k)L2Jm$pWQ<;#3rsEWBN z(17Aj#Y1l&%6`F`##srtLL(zi(w#*xCg{K=j1;TkWa@YUiMpPiaYJ__lR zNylP2cIMd!aP<)!y|)0DUCt}@<3%Qbl07}elQAW$b=f7FeTe4?7h_7Uw1!}D+LT9$ zC*vexlFBI0F4rUrlvX%NS@*=koJ)hu5+? z=6aJtzM78S9gbcHcPPy>GX3!%%&GQ+i{7d!#3{{Q^y;BZ)Rx!{(KwepzMR$kRnQ`N z^(U!p{0x`gcd5tC%bV_V{#WoxLo!hO3s;9p7#Cd0j!(V?HG?IgxSaVB z&bzQGWiJbvcy`-`l(28!KbE*^z_~{Vtf2oum6%`#GEwk;{wU5&>x!2K>ER`8;1f15 zb_i8ZP#=epgG|sEU`8#@jB_D9k2BtDNQ7oM`=ECsTVAI$`O%SvRJHmO3Q)2EQ@?Jf zBl5I1#Az*nA9#b1?^67}I_M0C!FXjZ=(vgBz-8qTWBfDHkhBogFE7p(r%OU=pa$LW zH2Q`gQvynV8dPd>J9i9feKkL<5S+~lAMF=xO=Xr#5aN1Eo%lG5G{gqi1vMxwYR22g0`eT`*o5iY^#_z zBkJAcc^xJQRiZ;Gwl>-7)ylIV1>Q)j0ME&CySpAJ4%F4(gg%AZ@}Eo95>rX&DN1}% zW;XFIb0NMjtyELz0q`BSf8u^Cuhm>Xu%z+#_3H#5_1?i#JVvCbIVSi?{9W_h`^?{( z%aZsz=MAc+PC%V#ZyZ3l1d9X_0 zcks6ihlT!Z!w1HbI>zi>kjvk# zaEWAd=;bDe`6gEC7zZc6yoDDVx%$l#n*^lBU@M8mDko2^Z0RS06lC>0vtaLvi!2S( zTlZc=2Ex;%BCZzewabdhuV5*#aGDC>Y<=GnBzyOsT!Xrco<#KBbFRZJ&8tqN_q+l! zaXNsX=Mjx;#(?Q|zP;s&P8V;?h32or4uepi)F7`98UFRk1wF&-Ye) zEBEMHYSZGDQFcZ})rBCBIjv3JoiKPe_~O0N?<8B?c%td>Y)?0a)d5eFKlqp>j7=lf zR@sB2TE?VVcE%L+1s~R!WM|P3FRQ@Ja2LVj7!E4zv8*GuV7@~a*$F>Y;B(z{a$@HG z9<)P-IVf;C-nA`v$yRS1%StBC`yP}kGVVQZSW`-P=Z{dGz#p~(1wr2oC9rXig(c2r zDtXb=I=}oKcLKMpAQHR%hoLb^$rIgjNH}4!R@`D{2~7)4fpAA*<1$-ZpjtqLmXOuz z`5826K260eI`T0(a*}&R{~OYq`lyI#Uu?wn<-2;L6HY5(;oGa7DN@;cYD?9h>K;(R zGQz|3yr*l=wP;Qk)k0xEKfmZfWy-qh#+Z~l&gCh|lCR8PUReo8liKdgzk?6^m` zuhKyJcWQ&6kA?`$;a? z+p#2`Q;$mWP*fXKyE#NIsKO0MFHLJYOi~-xp%2jMd-bSaWfi|XnH2P+-Of1 zc&5esa#{G((_ zq>yP&wXx9WG=tP*;kZXBhMHMoNQ5nyV!G`Ub%HJXwPHh(#c(+*=R?UbzFgDP5L;s! zH+@Pr>8M(z%4+1m-tTU|PY6_08KPtFg87D5ycOV%O#E-qHfFZbcpXJ&ASy@a1SFxc zP#%FeAW*;eYikhpJ+~W-yr-FO_E4ZfL(`h`DMLMYirANLPQNo9uiY?@dRExt_O1#H z2y<|g3EodV;?NJb5_@3L_&ZKlzT27CQKzE|GL3+YWE8U?XpIXVC=nJZhAN1d-)_Z- zM!VZPhryLu*H|K?MkP&Tat>vk8&^&wI}08PU6JM8Rbjz_?@IoV6ZVHFWQ}43NX4;2 zdRB_S7lKZOVJa0Ci%PS9`DGQl-8^6; zyXVBjF8RCJhCSy7Ile3vw;Owz?2+`(oR&HCLvB+~fXO$q*bwMm60g##KW^!xN8c49 z=d~5x2O$!g(R#V(1CPy!*fb0rj}aK44%DF#4sJN+9AknSZ(vGh zu@$E?K-2^}i$TC=WoqdhdS-yA|L9@B_=w&$)2cz`Q9VcngMtZJ(LyuMd1%ZMIU{h- zR(k!+T>|$Kd|*V7$KNX^ml47(+;en4qR+*VCeMd;=EmF&1wpd7`x_df!ummsk+^Y_ zC~8*q9f#(LIAaQ(VTTsCf%$ki3q2L zv-S?!M(lRH-@GT{V1^T>n~Xr$9=22cmR`&yE>_3kO6!&%@+24%wz=B1@$%cw#pJ7O zW1z7!`_27)S|QI~FPEJ0ZznWcVc_^R4HTqAa7SKliUr?tE4k9#bADySM)A;EzR6$1 zgZNE7ZfFUwo7^r`Yy`GZ#&iQ6khhM&8sV(zW}EBD+kNXk_8J%zBkF0gOmdO<;%f;( zm_KCOuraR&OI!G4k_xnfH^ymJGor;6bu0S%w9ZvTDEYS$8vT`F+gL(>dax!LwA{lM zhBq9#2Oem~>D(vQAUF6%G^kS zrBWh4dhr=pXo;*&g$rQ?H~bN>vzlvN0gU8OB`Rr;Kv(0{ET?xC)ZIWeLyP$PdWCHL z5~|E+Pa&?_%s>0}{WT$!#o0Go!&zQ)|!y;`_TaG_^8{uE~Z;0@2Lg1^E+mwOmO% z=#^_CSs6Xt5B(tx<$<4C#a-sd+)M+9FFt{#O$%T-DhU`HVzF)43%{8cg`b^Vj+O+I z>WSi+N}a`;e!5(Yot5%JX)^SNn@ zR)MoV`~rbRm(&c`J9xq$?%$YJhL5WbQXUf|s)iEBDT~}Iw|=@awT1d3>Taq$LFgpwJ@IYhIak^~*M0wrd&CNhGbssJc`S zL-Cd!a>JpE8+eWEvT#u3Tunckn*(=M0Qs9lkM+yFqm@KFC5t_FAqDwR;T7!yX6{jZ zWHPf;mFkqRG5I%3n#Qc;-rk+&AM=|KqJxBS2Z{cU>1^wnh(r?S~ffV$Q;(AVvy4}M%kzBOM$-ox*)$Ym#dp}aJ)=T%?&tPBI{KLiD zi?~2sGGb2kiDr$hzb7FktUBSf(7g1oAR7NTNUrG$#I)$hc@i;}#7&szh_XBmd%C=7 zi8WeekwQ+y>_xhTI$5^N_?`7UDCA`QKvO#$+0E;1KO|y0gc|f6N}}}MsPBzvLKGZ> z5t#JRNW!dWX6ce-qt;FpL^T)@b7idMl}=xzL*V4qOp0~e#=SM2>+w`ujxZZ9JAHbJ zJ-caiC86q1y@Sjw$7xnE*#e^MTSJvvsM0{42Dm-x$*FtDW~ta7xw!(rr+DmxIJ%7pvlUj>#HT^6dHs3jp}jJ7a}VA(t~~X`(D@B*NiIPlz_NzSHbv z^nl1yT($Iv%t>+EIg4PGI8U)}&aE)^aINT&kO4Cq1z2}~Qlo)5vM}j&4=|+GF5!k8 z{7A0Mqr9U17zFX-`}e-~-neHVi$yEnYT1W=u1V(Yb$0u$lw0bAQwwpD%M}e3D0ENS zLQ%pl`tnM7BsddiLFz|uam6Z`KYjv8?Jf^I5#qf`V)>XKFFzaXY4Yp>^uZn#adO3F z!?LeewFx#L>vxR=Zl2p3$iUGh(FWgJDnF-b09N1`yl4716Qoe1KflKia3Mw0CQjr+=b_en(X z>tgpd@hvpQTh%bwRYYk@F=3hpDD#ts=TNCqg3ioj6rCSxSLc_u3IvCroOyB-4Flp? zq^mvBDh{+x4SNs3S*V+W>x!Cjh!=~YmR(AA0XUfV)TDLOxo_DYeOqO@NGdzmYhBT4 ziVPQc;PhLPqZFm76Aa}YV9B6nE&Uz+tr(c3rJ<)@e7V%H6O-0E2-CJJnb!=!WrqnEfX6~uOD)G|RaRwYCwAgE%M2Uz-roquSHTUUw$_zqW8IJ=k*0i6# z)o#7&GSnE$V*BNd`5fd&kqdXJw95wunhmbzaB$_OA1(GWi2{aQ^Xb2w_Y?-O4GF+6 z_2HDU;<<()NaVKTpd9AOMz-*l98&j~|WD%Fo{JsYPUJVSLA6$RHAhT!= z?Q~|5DIdsfm3|YEnPZ$GC5SY>0-1~%fhWlQ7;Kpc31f}ZOvGiIwYLah8v+@Ql-v;X zH9-yfok~+}gQ(3Oga~_Hs7sq$Yjb*G3w1i!rj;yh9}^@1)zj=^ixbVvC=;tv%UZ%? ze291f3r+%)@O(ewX`tzvZ;yl*swE`g5@JsY$0wdUc01v`>72YAJqimkkDw@N^ zxkE4-?nKY)^?Q@~Orh-ufD}firj=--4?+avSM4nbAc`eRy>m^tB^`GeT)9v^QFPUi ze>O1iMYW|u(^bdf_em=ZC>-SUG=xNrv0~BkX(bfBtI2%fNmhEqE~>jB;W$ zzxJb?&>=%rMC;?a2yW@yY8*uF=JbLrX)|$_9V3#_jG-J{5HT{nmbHb7`*fo~=vJx? zG8=UT-)p6+S4|W|@09Xu<>QsPKT@K#u1dRO<94pOUb$TNV`mg((WCe23E8DBKtOA` zei?*N5mc#L4?*`t$jrdprK* zWS-#j0pY~#?ViBL&HXvX=j~!W=Tjm3F?%Dgmk(Xp&v?dW zpRdxRjp1m;+0)?ou|92%q&Zcmsn6?C%5_GH$@{~*L!SAgETiT+s?6zlxdi1#=>3H> z@@Vd^?Anm5);VLnta^NC8gv76;@A`Mm5QGyQJYiU2ZzFG*~~go2Xv))9gOODhUME0 zT#-tdv)ikPH?J4*s6>Lo+H2CQ8QK&yDYyFBzBW$Zd+O|Y`9mF>59H&`cj>G~i4%h4 zB{o!X2o1JwpA|om3JcR4%LyZPBt6$0A3n%K??qHTFpb46RR)ePg@bR11SPdw z8QvRQvUR%wq+?54Houx%c|NTlpbpncEyu4p_`4)EtMUnSoJ4|k^J%@gy>@M8B)Sf_ zZGOdc%lQWzlo+IY`8S0q>Mp;&nQ;*dFnM2Kcqjykh*x#0wU?J(=t=l^sj{3Ntvh=B zLPHUt5|?KN2}C@PJAF~rtOVs^(CFGOs6?w0xh`CG__RA}T5`IbI84`Q4wlkll2GR) z(|R-DCBtJQL+zy8;JaG%1O!w!A=E6qn=tF-+j{pEKtOL&C>sL@MjQ3$DA$p8zo_{> zLmlh=aVHN_ECWN*b1ykc$w);xHcmb^V*D%%Ye;&AqRpp1;ejXrgb$-$_2GEf>T5DN zm$s~usRXv+I8VMl6u$_(wr&m^94Zfn`a?<=tsvQ2PuTCK6N_y6%K^-(p{O zP-}w)6S2n-Y50c%&n7Q&(t)JMrjej2YN?UTS1n#igs7R9mqQ6`7fI-_oT>LZC_f)w zemz=Gs|Ejt`M9_fmNoV!g#>}%$(&<&kASpBgnU+xS3dJaYwTCVJ*}24K(O{&wnN$g zy$KFjwId8}Ms@CvAgyX;889ic2)1t#h#w*?8p8 za{MRy}Wg+>?eW!Bhaw-O_$~nEY8GvA>Ws5!1YLmlo)#x@2}$2Yt?e!TuxT3o zvNowilFIewdL9SFT1HgC!KVX>X%3IopSO^C*k5PaC-y^BcyZ%=W{8-7Z-?H!#9*d^=F5s>m?)wz>RY6U#Yf zR08pBySL^MWZiEuvu3X{Db!0M|JZ|wkdMQh)MXuN1s_bwKIIZhGa4I=d1U;Ipll;_ znm~mc^xiZ+?mF4^)p*JX)p-eM@HB?s3kk~iVpzVYk0K^x^RfMw;SVX} zVLxIhL7m2%63xs>G$TgYVqrd_lRM7_3*M?oRtVRMW2BlC4*GBB9ZiK3@Ydx1SssXE z50;5q;px;~-3ZSif#~vjJ}l6XS*1ZMtOTi_VH%O39NwSUF0CToo4>@}syCI*K5;Np z-+Gq)U1g3@B93}_Bsv!^=a@m`q{f>#3beJw@0(s(3Y}oOnFDP;Mla!5%}O4=!GIv^4C$*$~-a5)*eEUofI6m=opqJ(QOJJeEJ<;gcD6EZ8A( zb@eK4f3ef3tvOcTbBs;wImpUg34M$N_{=oOJ?8KM(ru6&R>`3lcE1+I`@(ykWzXvU1; z$KJZzjNBS|&m{JQ?BI~ZXx}eQmYutbm<}EXS*t^nSyPYrCy}IbO#u!tBdDk~h#r8v z(!Vp)z=_H=T4Zj{v?4k@-5(l?J(Q_7+0KUc+b+V)kIgldzJ1q?QoNL3Kb~3r19y4+V)Ur0SB2xU6^7q z{vrF7+5uH$td?A`Ka1Bkr`&cxX=LMvKq6Nas>$qtbtZ;kFrT8H(&Is}GFuyh^v$!Y zCrYiH_O+a@BK>&=e(|y*Qc+>a5|i@CIp^~<#akCzs#7vb>$U05({I(* zt~GRfGkhJe$5J?_WlfQdmMgMnncs)-lyS|7fL^^lDU{zd#;{s}1;x6RnQta&mmn31 zc?7IODwI8VensOa?Lm;T0!@TL$TD6H!u2hwz($#M*|)gNkTVtugi}r3m#^C2Ls{XS zI@wuGe}7DFODyuo8wU>+jGoI?GxEGVzUS4bFN3cPL>*~uIXl{?<29m)l!J*;VH-Z2 z>P`mB`$NRNSm74qPo-WtN5*h4?Wo?Ta18ONYH^}jc54Sw+JefL+0s>DFs~xvRg-yC zVEPgyMh3CgGa86FX4#n$?nun9ErRrdmM55mVqeC>)rfMBdYr<@lEn8K+(7bcDVHng zRgX>4#fg6DB^cJk`F(k~xN|s?W(g^@M6{9$`xtq$`65mBT_6l|Ql31Kl2MnDM$C~9 zD0jV_hjKv6i>BTppv_)WMqxv({e<~wzYaWzlAssm>TKU!HkXGtBKt{E{9%qzXJ?TD zE-ha0n078#!Kn1~moBF;U;DGS|H}{%XpqD2E({-_KegJbF$MQAQ=Qh60H%2hM(hp! zlz21=CLLHkCEnG_wfH~v*nODKp+U93Vi6Ggu8DV3a{uHVx<1s?eOCx^PGreokvS%~ zr}8rv!ou#7q@+-|y>H2$E(l7XODskgxIGc;y;N{&4}d4TvK0PBuDxB%&8*T0>jb2> zP$w+19~SQ0KAv*(`b3^+Xu}QXvn^33y5M@^jS#n)9K!Hc-Uvv!d`Ayi3r5eRa^Rnc zG;3a6lw1_6E<1*Q232Xr7ZT5F`W@pduY}j37+etmHg=9xuUF_Cx;lFoadH!L3m3PC zZ#dsfArYN6vZyQFYsMVKqdGFX7PpB?DbdjRi}ojr$cwlxH_`jv-ZYwBGKv^jHL-9t z-$G}(N65&PHPS03$*5wN&*YDw{i9LzOsq%-xFKY~AyNSv#U3>fHF>*5KlY7?W9#8R`;CBq!+sYgf4;RBw2=Apl_Ou|x16rD$mw%F#6Owu-9L123L!AII$(x-^5GYo# zstU2j>S;bSofoQVkRRZI!GPb|5%8~SdwP+YEDd4VdIaL4^ z`oTp}jnZ0xbf{7#U!)Y&@;lg44|yiU%He~%Kq3?Rb)I$2FPNLP7XvAZg)}GzA2Erg z&D(ajlB#lf@HMqa`ABiRDy4{Yv$j2>00u}MyoOZx7%#}qcM}DqMb1TvtbD-Ssoicc zZIz(G|ETTH&S(s-9Y*FG9Rh-5!{XbT`a`g5eUvOau@N#spDc%CitY$rA528Ff`Z@r ziY~9nrhT1CoYxy9JhU+$Gf>{q1FMVN`9tCh-`ml}GQiZHKub;-GX1WM<2YTS5v?C~ z?)X0E&AY4U!n4e+Ccnd(X~I#fv^+^k(ywXH6Ds7S9@F~q8ZGa1v-Gfuw=egc zG1je&r(b_j-7d0HJMZO@aF?4TgFv`S0i(*rZ;pqq?y|>MDmd|`P3v{T?bGdivFj&i zVJDOcP#Zoh5@o7|Gw)5OVRO|W9};HcriCoFIsgHG~P|!!LzwwdjEyrZ3ey_oH~>xJyS){x7LaCf9#a~ zsixa^B_rK2fBKT{-OB{~l}Y z$VsWp8sstI>ofTQUuQ5c7G0jXCrKv(3Nm)lWhS0ctSf2kf1lNnVGL5TCR&A@J95xL zY@O|al&?oeHh5zy3IrAlAK~ZtiCJF8rkiJxjv3(m=a3`3jPHDxvloEVlw$*W*)jJ6 z)2fUaxoo)j69;@zSfkw&NjyRz$YAjl8^vaZ=UOLvUyqe*7{7#n(Vt4Z>SMNJ+#%Jy6 zuz?J~(e$tv9hS(XiE=6F{NWj}HALW>ghSXGJtHVX_R3`L z>^M|du}+pU2f0EPj8g0wPy@?-_Akj7#H8B19YxmCqOnF8E{96^97uE2<}ph=v|MN< z74J6aU9indyH@lHA}u>U2|28@%|Z0$G{pl%_=qmtp-k*|&mZVst?sm&6XF|H!mzy% za>+!=D;I0klf4+-$l>daV$T`E?JDp4N6r^LsQvr!6j(NQX&YXm!LHCBsai>w=zoQ*kHcUr)&?-NS`574ZG zrkhY$nHKYCFK2T|Kj>?J2lY&fcos$B-y?mSw62w(Bmj=ojlWRUk`LXM zX=YNVESY08RM{3Ue9zPdFtC6RJ*?YcR2+#-C^fXT`3yexmLk6kzx|j*#?lK5eLT~B zzAntMkbURHi_*{h&(s+E|1LGgz{v1FFZ*e&N8_@>f2GEL|9#m{2q@wk*6`FV%@Kk# zD-ilhi1Y`6n#v=a!32}0sInsCCO=oORl|mIqRwtHAzYGcR!t&y7ju z=byL8km3GE-LW`}UnRQ3!@BTyRH}7Da~5X9!-s_1cH&C*M%+9fXHU(Ut)I4@Z_|F} z`ul4h_g9&sVS4(k?@XgE`?1P562`PG0c!&bk zXG;S;KMdds6&-#!rDzY&aZ9(feqh%EuemzcH&FRDCU?ilRF&@s;UOsNt%>aoTBd@v z6F7VVLa9;D!}h-EyuW*>Eu*Y-=;TJG@mRrCX38_Y2?n9 z5Bw=Q+{^9wP}Zauz$Fo5B*aE9^EtN?M9EOCtluxrT!3x{PPK@XV!1;GtV3^ zCbQ9oW2Y;NjAhG$I+;WFMD2@xTQV~4nA#TS4F2|r<}rkuB0hGGQ{=lJ-oUOz5j)#* zHz^Hazmqoy*W*YY=t{m(syq-7a?X0-oQnv*k(4eP;fn}@&t5N-R}V?{;v4skEFPcc z2cPn>S>Xd89=|`)5y5ff%x^bZPCSCBjq_BSuxUX+4$`V!sr>94geKiB2^AAcv%-ad zZ$XTi3b|kl7#2zuM_^+xxe})FWWU3RX^6+pcI=0=4s;|9v?5W-I$vm4x^*>6K>l9s zH+sZ2o8YNA^Eu1)KC0NRhtt!sV6{gb$g-}lO9_S~`7naPCl!#GyX6`;kIB>$P?teX z@rH4(+y)xGWrC8>^MT8FB0YV=@yX{u>#~E0VH^-b*VwV3Oh^nnbcKo(6fpcuXGHq* zB*MHj0uPk?pLsxGP_24BlU-x4)N+swBvo38R~l5aPnDF$@Wk;$$}U!Fic!+%E51A9 z?m9z@Y>gFJ=m9 z6>E}E3uh#tZ*RYQRfhyyKl-<9xmhmFJRC**6kKB1DJ`jkH5_Ec3h+}Gvr7JI{xkc> z*dAd)AGxrTu0RnH3q;2$vLv<4L**~VNp1Dh@V!gNaK|M?zLN84d$t4>KdC+FIwTnr zgTr`9O?+9N?MI1V)w zR;*2-wu(MPg7Wjw%y3ZOMcf0En|MsPSyP=;_V|C6p^Xx4^2hh^vpvUS3b0+Witi+d zEx3b(o~n#%u-V;0q5=7ql2U9fjC@W%h!0Kx8cIxU_zaa!%aXx?_U|k9uchZu?%`1l zMDcX?3o8S%x}oy)^MC^U_=^@el@zwiCo1vDnqeC+E|9+K?#(7CY8$&btZqG$A#jt) z&xa?3(1V$j*n`qvkLVw__N&OW(Ahd*d~37_;v(HBaut}bMVIJp_vs*?1#bs=$KrIm z!p|h~qH^i6n(|!BZOU9fWGPoHJqYE;3;)jIpHMTb2j3bRJ_OFR{fE_b2Pb@!JdA!W z@=tQqH;{2GehAGb zIh_2092YbY6VTork zR5IsE%Q;xW1HCNkt$Atm>!WcnTdEnwa=25wbe=0Xk-HMd+kD8!Q_Gx;O`0dnsYS9{ zA)wc6h-j(<>S+*zU~LL@v(S>35M0Q^!>YVUw@?IG|47**13FToN^~<1bh;s|&`V1=c9fgkS@=!69zext9bKQ% zR;Ti$)fS}y@>MTEj=?*85ev@--u~hSL_=j{0x#~VdHTiYs)iZmWR5uAuNW(C@Vbp^ z5wJ3@DT2}r>+nz5BUE$y(VM1qxubd|8=X3Q(U;6N<<;AtUOkQMWIj2Knm;zRTVt$W zDhRP_nw2)sh_m&(tQ=!!g7Yu;NbY3)#c8_h%>~`zc<>X30+&4f7=)Udm*NMwg{$g{ z&@s|StxKdAk`44`q!n-F+T%g0E{Bav6oo-szu6nGJF-vCRst{-t$4?OB!7MUVr+FX zigPKMr{^`5J@SrcXcM#uK=3!kvmGs@<~sL|SajjFW_0s8%1z>$Y%nEBgc7Ld6{NzR zR-Z2ACcx-Hdp{f{|6UqFY*yWHWcDhPbJHSp>!8@N#;`GydI{d{_<2XaI@k8(my#Lc?@ge6lVO@!t+*3U zw*BFHxowrY4}=2d$&&xvUorf1f2HJRYXqQ`(=++vP$(EV*f`l67&!pG2qRx)4g*Ca zM}X$vJPukVBUeWNt+>?}P~aa=!GAo(wV`Q+T^&Ug9sgpGd^Hti1pLM6Q21hR(2Cet zJN{KC!USOYue!f#rHu^D^aN~N0h)AQl`QlO09JN}FEN5&Xd`P!2f$x+4+UsiMMrxl z14lVM`@bvcz9=St7XkEtHTz5cKh9EFzW)9<(dGY7gZ?`0{SSk_xGw(}gV_GRH|VcD z`hOV2@P9SvZ;$-n2nyQ&^fLXw@R>|$ zoXi+qtzE3`jI8aM{<|Hmnz{yn^(&gb=uLEA@$~;jv9K}Iu+V=+-v0*6Oi#nW$jtT? z#DB|WHnDOswAMGVV)%;Ie`x*Ry|d6W(y%czvVK|dH`Y$iNuQa?&W*{;%<|u{jBM;* zSUR?^3G+9W-rmBDiGhXD(2~Ilp!r|p@ZWO(X_Xzjjj5rPi90L3vD3e!SlC!;n3)+E zzNXOM63tkgtqt_-ZQZ^oTmL4InVpV?fsT#+YoPyzTCg&(>RDJYF&nYd{X3S4nU#i> zm686-?|);h=&V@S3>;bMOzGVI9m~l0WfU7D%U96$BcoMo{`~SuVkEeLV2PIKk&N$ zYI}9w?!4+cqG=t48zm(n@k4|>0EWs%S|#Ks;^$*Y!W)cg`4*1D5kAfq&PgK%8KIZv z$3KK;x6nI_g@%TPhcu*T>$mYLqr8pNSNh4?{pjJnWwK@RqOi@;==fOnOR;3C>=K^u zTaO<=;ACd4jqnch;#+Qt-epG~v1nwg{u~OWuQkaxbxy~L7XIMvQC8y3@WUeFp~OV( zBZ6-5GY|Srt}bWS{t-^&PZU7#y_Jhhr|Vb;m*R&K6>sEWMhT5NsvG&!)b=;n>Rskx z9X6Yx!D?!&4umriIO%<>WWxI<&W{wKi??cXhJeFiT$-CoS9aA=0NCJDAzlu5 z%u$L?fikVV7Hx@LLkq)pA~FhD0bX@B8@@(5j`k%PAYz}>ER!$P!slCyuLzi1L9sJ~ zI1^7~aVAhaLS0hCuI6|{qSJ>0`q6^axg$8yOW>#VhBOi&FaYW(YWU8Vi#mqn!A~&I z^+Y^F-~(!|hrCxTRV_cm=431~7N%8UWZ(CUL>RnG9kS)>4F~I2pNlWj$CmmHMUSE) z=81^C0!*zOXR8&Xni(-zuX2|zeL3B~u_F$^a+mPgZ*$tajoa}my8q%361gmFEu4u$ zhoiy3#1EwwpGTqg(>NHtX1m?Uv{n)i>P=0uz)1|qzU!MY;V=f_qk+6giGe%e(FE9c z_Rbnr5B+MHUWH^v40Jt$iGB^F^OOjBAMmA4P{#q%DKgp(}TgWhlwlOhmxH99Q~HfmLaB@M3!kr0y=^k zs)k=G$`_RnORm3Y)7|-Jv}&1*bWFq@B=*7$TrQrL zayoijyH7!2UDn+{HstRF^kpASB3_fEC>xVLdngv# zD{ZN@f^Q44NZYVXt`IAXdh2hWCN9B}*es^|*f&G#&5VmfQ7cZ? zdxHJs8RPUFeq=PGP4&LjuVBT-XoMe@r|KKAV;&c=3Q`B_iNy@j!3%-}i=R}>_p8sd zt7T|IHDU#GbvR=sFA;Hr_S3;0@2nQk7DXfoW5dq>d{A2j8N#uaqphiNn1 zGldnhD+s%YIb>4Re-oTzmqoeqfby6zqHpHeCWwt-QHZfC0E+;4uX%#1*f*l>D{#G} zsmyLNKF#*`p!@XJMw1%sfMB@C7jQ?;Rvj~~VzcRWBKD}3wSDtB;^UbbssR=@BN-B; zTiEBu!$jz-iSK}9jm4NpFkm0D#oO{B9Uqp<2n*56p&Guy=AuQIUnaZ|<#EK+q!;qJ z-1_AHaDjMu#e@~K)6%e|{$|GcME6ug)DQCAYykIHf1xq5qxy-K9?=QyGQ+SVT)#WZzr^#s!&Rss=fc8K|G9 zf)l7J_+sb#)*VI-Y%+|^uKIXyB>Fhtf{?>5y1mTTyDkf1@JtRNE1-OZYcAeeY&*S+ z_??^!d_6E_4zE=4jSkJ{bxv2(N9Gevby?gNV*`dDp7T!A)+xuAdh`1m_;T}W;P<#f zlR_Up_5Ff}8RNH2oj0S?A~qc*}+6HNzz5MFORL z@vYaKu1H$|`Wp`pRR(b*1%)=Qcb3zQ>SWCwv!~=0aRP?$Oi|9+o|;DxOP0>$m(@wp#A zQOEk3N8Ytd-B=1T&qG}PJQ8|Jk?514zX^*aMiQ&;T$$U$xTRJQ%f?T-rm&6-AHdwe zJ(IDE%O@$~!Of6Q4(dkIcyDk$B1|A%3rC?+qnTUctv#alrLJl3Jc+-$hR!nlKE#x=FR}?Vu;@e{#2M?0$yD#9wdb<=9hIr{}N!oQ21C$j?FJZC>Es#{WBz$F`O)@K_YFR>hl^IO@F zaWa8s{=VRSq}5g`(m=PHkD!pulvj_fjLhIC%SD;C^f78qU#W&Cesm~jtyT4Y67Brg zIrv@a6sT2Tr4io;ATbhUCc&SiFPixztaWEbn;>vpLM zCJlaqNJ1JLNR>Yt9j_N0RTB09>nJSZi0gxu(K58i#U;jiyt$1*>CRvehf%f$ z_SY@7APLMMFRG7cnu{fMs={vS({u6ogPSPAL7UKf&u+CFrX;vyP~dMo)x#I|9Oo>H z=JXkn;GINgB=WhSZOenfOrje5la=~hES75YlEcM6hZ1S9CM%I9uP(t3&dri% zBbMDxRYWz!ABkz7#ga0rE;29g9kO7<1sDXB;DopLiv%Ev6Dw3^uXIEWF#6j1yPv*T z3&CtgN;SS1G&)swsZsCeDvwlWFjP7nlm@r0@iDEbZFDBVW@ESKqUi@{R6^;1f#0q- zoPsxxy|KipS1{Wu8X7!qmRnn^?)o1EnrT?BC5ppU_9coqBWMC&X|A8PzQsN|BGXzE zY}C|QnDMD8A7`QIjVhO`h-fTFoB!6pHdn4{Y(HpR(fXPi8J`sO&`!I8j5Da&OyP%0}98ljCNIXLG8$(;Q!9 zM9!PVH1_`kg+O}0Q!+WaeD$BeNG2zpqK4&Jx;2q#Eh=InA`iwfMIHz6bg$y>OWJ8H zHcyRUgxv=tv2Uj|_ASBgk4D)5-_)Lj3oy~zGO9sKN8o}$8zf5-jjgnqmF&)zxW=)P zmTXDq>CG{e_KcKP z-lwxHL$x{vHwXcmp=dzhr6`n>IxDk8gQ-=_$A3C5)YIB!1*MYbkjRi$wRxC^jfQCS z-#n>3eT<#OqtmBl*lg{aI5XYvjPx1onN|tNFfNt#V<)a!WjLb@MNPZmS(ykW9>z9A zBZFIE9L~&q{MVk|-G_z7Kx-1aY%r^U@~35Gl4n#5Wavgbm~<9*AA*W*)v6(}$dFad ztERN4TP}!2)Yz(RbPwILYF=G)R&d(W+iwcA4!OAz`)VfXi<5%Fpp7wd!>El(a>JO3 z4ckj$IYT-tZecg+e%p zfeM9fiJb5@p+LxK>laS83$aahSDAHSu@J#~mT-(9&c(0+!!CjCXHFJEn6Dx(!_b0Z z1BP7~-oT&(KxCy640AC&h~XeB5pslJTO_OwED(Hnu8-}62p%|!A&o(RFk%UY5f~<8 zxDCUD7<9ZI%bAN|8HQaLj`5PD;Ay*~9C`P&-Nd6!vo9*+$thZLQWH;Zn$W1l!$)ax z?O9s2zFM`O67#_s1 z3&T+iGJPLWrGI7mi6f(HFi`qeM3w#poAnn&D}4{C_vm}b+FNavRb|^aktoR!;rtBY z31kSnyR4nQ+4g-QN>m(28YOCnkP8E#T*z(9?-_0vylvI9!tL~vO_4-+?Z8s{Hnd`( z$jRGy=eHq(VI+oT4D&GPkUD_W0cgRn8pB!)tr$?s5LFBj`U-~EFdTqV3`q*~969Ho6P@E{s@$p$5YU43jb3hCxSn(cHFaVLM)A2fU&H)@*|>c>Fj# ztUz*hI2j*|0vKTs@7o_Kga;!J#%VIX=3Y!Ph~Ih#QVinP-;5N4_~q9i#UOss5~LW! zr_Dx+L44w5q!`3Uj718eoj&kfUO~8W#2gYCXr-58FI|egbSd`Ir68hzh(GvVWW4>l zt*8il?7n28uqfQJowV#EEu%@x!=z<8X}OxTTtix_Nz3`9B|%z(q$NaJlB8t^VGl=I zNOJQT$*QE6w7f!E){~Znq$N&T@<~e`X^D`^WIK(vomI}GIv#Br$R-`e{Rg1-vC=5^ zXcT2WiaKEz;(iQio=Re+NUm1d7h-YlrlJ}x-J@^W+=0XB^LXO(I1JCjhZsa0pyyGH zp2v$k&o(p>YcNd4up7fs3~3BHtj!()-Nw<1Sc0Jj!(vbtRXTYln&8KaJ$hwu-g<{J27wl*8JVKX3!7R z(_86nkb?uUIuqa4_I*yco!rwF-w__@B6mYbM5!ZH5GVN<_ko2x-3x*Wiz^^VpT@YX zEjS)ev$n;%hqn_8J8NtB`{1GQ7r}Ol%qk+g;XggH-d4Zj_{G5m5#yMj48 zCa*g0FnrN}};Ba4vP!XUqCEW|H3uCkrXOm>&o$PMxc^w!Jd z?s8NP%Q&b(z_RWeDh58F3ttZCB+KBAUgs5e3#-#QfL93yu}Ub& zs}Mf~d6gjX^D5&{S1HLjmIbA2H)iEE;)-fN%I*EVxm_8_xAe;I%PMyS@ zuI5HM6*S1cL)kZe+y@n=-@_PZ7@sSN16&TcrHa?FxQKJFnwK))N9OPE#4dBAtFY3KIupO)83QXS9#}k(72%v>MHc2+XNtU6_lpL$lXaOQ6|-%zL6BIqc;RC3*3GKb z{w>4^{qu?>RW5jn%yYNnA^;n)Y>bqbKIxErHV~g;D><~a8 z?2rV3P&6~W(pSda@STzdRDtOf=s}2u5JxQHn1F)x3Fr>p5Q{nXfFi^aNFbI% zF=9`|e?l4bKrDw6#0n@)ABSEXdqYpeN+?I{0~P6iKo!Tn&422}(Fc^q<77Ri>n`1o;Mmz^< z5r@N&^iepM;|LgvI1+{-j)Jq&U&CmQW8iGWu~45r0^?vf;&_e|;9SH87?J)88et^j zM8qR-9*jbI5{yA?g0YC_bDRw0(tl&msS&5Z1jGvvzl5pKh&T-Ca#> z%tBlOvk@;v{1h&OIfzT)V#Leg62vPwUI}v%uY!4qSHt}DAy~$-1r{P+1B(!^g~e&M zkG%x(dXCHC()7o$0xm;b2}==gfXfkYge%e?!78{C@g}$mb8bfb2yTI^5pRWMh_}Ht zh^r9~!tHP^;vH}u;;-QP^oMXK$Gc!T;u=_qcsJaDrS~9y2=~H`i1)#&^at>3xC!xo zxEb*`93OyN5g&xx5ZA(L#D@^yhlk;I#7E!`#7E&*h>yXY>A%9`a98>-@C2+uTnBd} zKFRSZxCilRxEJvm_%-5sj?cpVh#TNHh^_Dd;zq>xpbZ{G+yrY8H^al}cVP=WlKwMn z<@g*tir5a1A#Q`m5w~;P0qYQV!jtKD;J5G;;_o=_f~OI8!!wA#hxLei;911yVMF== z?1fguKfuQHpWp?KFG3sQORy>ZHoOd*5nq8Vh_Av{#Mj_C#C_0?_&RJu+|Th1*pB!| z*opWi{5Jg-yoLBCybZrY{1fa#JOI1Xe}s29{u%Zlz6;MIz6W~||HARF@CU^A;RVDG z;KlSC@FB;8@N#-Td<3r`ehjZ7e!}q(yoUHG>_hwvUPt^K-blX=hdF)$e?QFsUO7{|ZEpAo--cM<;q?;#%N_)qw2`c*gq?<0N-A0U2* z_zHXvA0qw$2N6%gM~J65cEBg;mmvj*5YzA};(vAeFR#=8{vWQ>|MqX|^e=u!r+@Jy zoqqUdbo%E%(&?Z58#?_^r%qpRhED(FXLR}}T&I8XBc1*+*XbX3(di#^o&GV`=^uB| z=^y<_ryt}x{UF!r2mb?g`n&%h>GZe%tJD9h)Bodi`hU8e{-3MUf6h+-uTKB3PXB40 z{?h*sbULtiL1CxSpndzIV<3tEf+z?&JV2*|%vUTrP-I!gv`j%pT0u(I$+!$)QahU+ zqE1JtL=}z3ix~JNet|<2Dcch_8TC9VcjX;9m65$H??@|py&{92eU_w05-_F5#VgA@ zYl@7gVY3AzJ6pt*q?e>$ig$rAW;Pqn5ji#pC?`gPiGkjL#({PkM(n`T z81H6AOPG?4zZCD1#Ccb(+6PwpuDr8e#;(osj(u%3nT*J>fO29sn$XfQ>Wz$dP2jLD z3YDl=L_tB`@$6rkcP1fVvzbrx&gPb+lcdwUOK0*fox!`U3-63yWS{ZQXq_VJk$3$s z!#nB%#=FB|;Ynjx-i>%5ds*I*U$ezxHi21(GHx|n&44L08jjho;tWOMI5G2UG+?NdCHmWTYKGBn`%*i?%R4Ra>)yaqPKEL$v+&ZKi%aF7&}Y%$u5rk}j^|N1~(z<3V?ojl2z zU}LQ46~&4NY9Pqy5i8j3PN&@l4h7u|kHcjHr(JcR+rUJg*RbNi$X2UPXVJOY*?Nm& zH9Jg}Uxs&_PfQ?kLT;Yql4C$!&mQ`!cz~*AdB?tXxLgi9xMY-ZkIUl#7v`Xw1KOZr zU8}0%X|imQee7(5RZ-0@v-y|d-Jq@5k%*TkITLJp>WzB69S`hl$mkJd+U4=MoZ!); zj0ZeE7dF7*VV~G)gNE7)h00*F%d#p5>^7UtsOs%jkHz}S@NU$!Z#38UZ}9GRd%bQa zc=hOl2E2YZc-<~9x{;l{+wo_xXL+~VjW))+^_S&6KOW>s{?QpzEx2kLTzFt#1N(xI zm-&2wfX@Q~Jvz(bK#mVE6+l;)O;>g}T@IHMPm}cyIn2&B*$pmRz-IepcsH5&qF+=L z;YrSf1?QH@VlsK~01wMDX=TA+C=~QV$b|DV7K#P|Qz3N6(c{<7#!28Ydpy`|LmoTZ z;>3&OxPSB+-v2v?#llyP9z9|_$(dkdj4O}Xj|cc!mRTzchojMO5TYgsLw+0*!@3^7-{SWh3?4(Aoo)4){hp}D^ULsVwKA;&Wo2=m(Jw%9C|ARgG)tc6-x zG@6$e4MUy};FQ67SyrtqFR!2=FA4=#$b;g7qCCJ<0rc+thAzGQ8XEe9`aQw>wWp7&y>EQ5~up!2`7?WYm5H3JQvf3Sv-Xg#suk zDlUMcg1jQ=%f6V`&W_|nB6ynF95VN0XFGD#NKRq!7vkOF;48=AL1jFd&w#zp<#0P3 z`FKD+%W`OC#l@wi#f4DnfMTdDttbXemBJv%em9z*pPQeLr&+DJ)+% zpLV+4T>IA7_u)zYhZ=k+pDv%vRfGp>AjqiBBAgYydzbZq-Y%5!{=NH_LGQBS-cS$x z+jmx@C|*>Ar`c?ATMawg7k3rq_sYxvWq9}bxb_`0W&lreCfM}!1U;S}c%TM?jOy$G zm6g@imE}W9#{$0l@--62B2KKSdSh>J$iV1^sqY;_Mz|8#Juj zqr5y&QSNk=IEUlO72zKK^5TKT#lH;i;joh}=+maw^CV{?it-tZ27`U@fIciMsFe*K zJZ#wD{xB>EgJI0Da|gq)!2^cDRKTWaXZI=V(U<6QZuD5OYKYN7NCW zG2zJ<&rTgBB@@<6ApIuXJz@Q;^~9ZeZNi%IsTU?JCVlHufAEo~?jm#UBI{FQShy>7 z*IlXclp`z2L*!hW_VCI-_o8wwpmr_ zI3&K?Y_)RY&}Orl6JI6K`k>>imM{+1GxiLR?DemI;nNWVTSY+}f8}nE+ojTuYe*uI z+rQxQYj#c?zCSgJ93&s@+_q-b#5YfNy!UnLn-u#i7Eh%M+htwln8W^fa)Q%&>X0Pg5 zTHdSN<Ss_g{7BDIn2(L@6?F}yg`|DU7WSg7CAUNF!ss;fJ!eO9?aonE7n~R$+$1CGpIHGD}z~sIOC7z8%BH<$Cggm)A z)!>u9`oYOG#yHd%HfcA?Lk_hj%~q4iG-#aFWHb&Mr)y6iPTE*n^0Kt7G7PX>Bcrrh zm_D9#o6Up91$7LqCWf-ga3f`(^dt@bkSIArX0u0+Cv#b1MAFBENpAxa%K^8^glJ-U zP=bQA4`Cnn59^`8M%|CEbR4gwV^{hNrIHh0Cw)evj$Kk^IiQ+M3|U_1B~M>-vo7LO zgDB>0G-CW+`XIP5*fChs2b1TDx|Q?>;|t!GW=kNv=u0(SbSg z9OG2SMXou%=D<>Vscwn!a_dUnJ@Oj$W$%0RfbM|tFIImiyNp}blSq8CnROcy-zA-_ zlROLc$!M&iR8K&!>M3s+tHHY{$ZF_Uhi!aZY!+5vQ&^^LAybXX2%Z`N_DvP90Sc=D zyaucey?7IoGi)>}N<>CB&F5sM6G+p1Xacqr_8}O$ym}0YghRFWE-!Pt?Ko|9vD|{V zLv@#z^|q<;Sgx!aH|MRjOWGCH%T}FDkOF-3%iy?B2rSKlFzuERFmbPP5k*nCCp4k< z23#3_cIYdUk;`hFuQK*RhCQ{-^H7W+b~GyBoh*vUfkJpKgb=OSeOQQF%Ei zpJi$^jkU}%O|x8Xzua-7eW(3X|EGate$y`Fa}F9%gKCZ%Qgy#eAH!Ze2nxn}9ASUR zpej1uD?z_A81ySaKk84#9~8_XwVgh?X@rf~+KG1yYXLBpHxX(wWGAX2J5dd6qH=9! zSm=2Rg_liMvV&fO-knPNBu%z0HFPqaOP5iRZl`%@dEK^=PrD{m!Y2|;7xVdsMo7(( zj;2F4JChZJm6jd}3#w*qWrFM+>;p}tX+dTt=DVWtN|d19y?a&Qtm7*ePDxzD*cP-b z%BL!+C;yTAj;_1+ifivD+Z^Bh@vRfXp4jv7q|o~H1FNU*zIyMc7tZjM&sgeH(q=BpW<$5D1$a2@@trVa$N5 zg*9X_6uLq|F;p0o3e7RI$?HRFBBHX{7Lnupnpizv!j$j667~aoRZR`9$wzSTkG!D1 zV6Rg5Cdyb~gWpXuyUlgx6=t!{Ho>+eAdGfjq|SCub1yb8b*?b4a^4tt%xsV%0v9@? z$!rm2g7+nCOxgFQI|%zbEN0RR8{-na+v%g=qcf9w%8}_Y`yl%hnx@4%dVPw?e*73w!Ao_ZQA7fdxU;_>+aM)KVwqEtc9rbIE2Wlq{G2A zwwPU<&Af#R*6v#ZaUaG8odm;O!6i0OqtvU1pC#@ zq>=@zf9FI`D1;I9o(@OWsZqlp`EKlE5~~O z(wtQ}YaCBFo;MvZ{UxC29bQY3U(lCIrN-^3UIdgo)nRbi9gbHlR;R_`v{=n3d`Smm zBxzZT7O};ebdd~G&sjzC786WVHc1;}&o)_|t1eS-Q$_XvM#Q{a#JmK&s+W4RBIaEk zvF{|kz)J2yIq%bE+457O7Cuwd&JeaHW*MV+@qTDR52*>~!cN>)wm5-Ms$h2{(6p@>cq8$8#gDzkN4R7Tt3E zr4G`fuDWUO!}qm~sBzQ3Ka*NADRttHFW%mEkXfU{QNCQLm2#kn3fs^aJI4FQ)}A3E z+1^sJL_t=JVx3|X)vQ&H7A{P@HY0Rw-7;tfs_C?b3tLH;OeO*ebPFZT#B9bjEgM~P@~BLvJY2tdxUuTVSeE#0?Y7AgHn0WEZec`>D>p_(d4bFd3L>#cloi+* zmm|#-`3#V^CUe&q6GnZLLV;# zxo~~567Juf9WI;&lzZ{j_}_ znU!asJ-lzf#A6rqo%P`Ad)@oq`||PThZmiDcJoE4yIG6V9RO0Jv>p4>LUWk%{w`}| zo!XpPIKIo;9vUGsS>;QIqA=8nTuCaKN!V} z%&6lxHt_3k)LZ!VIW2RVSghzfP&REc#CLpFQg+=&ChK|=Ly2RFTB^=aX6l>O8-&&B z%hC(F-Rd#bs7Q@uJRPadG`6b$F#W^)4~t$jiDuCv7!7(!6wz=|bh2zhTG5$g^!!wpQ7ys#dCd_gePuT`6fXN>zQUaZG({Xw<}p&7xIM z5&l|~t`At9mj`8#nBjqF}u>Ce6UZ9f&TK-4c@cz>szkcXl^7ng(iR zjx8rgDAIMa6$K;Vq2}?<39j+JnbMq`E9^Jf?-TB|+!tI&9;NGSf3i5h34Yb7`bD-? z*Ve6yyGeE3s#GfwF@TGy5D1BSHEul{;t@iKKkVT{=ix)=;X~(%M-+nUjhQPZ#--v5 zO`XKH9I)q`_z>V|Ady)?H3g1EYzGHd$_2!u6Jxn~*s1nB)ZQLB&U86-Iqhtb6gThb zpL+h&BdK?Oy@3qg^H&jKzk2eMNf&>%;*pOj?RoU%@5v=^eoDq|Joswy+B+Uj z9ld==>Wfu7nO*h(>hFm-zpRi$d`WvGOa?33e6Xn@D=40<1b5xvI+4uHG)zyz+=J9} z!PFc04xpFkaDnD3)gR7Ld4H(fKvTH|qW;?g{cTpDzsm}A=qCg^lWaOeihGt0UYhJJ z1Y||W-$4|0KCjpZb)xc8p|g>ASw; zLZ*Kn#hH@JMvIuXff)zJ?Qm6yLP&2|Yk0#z4HBhB_NHrg!YJ7Zqp%61=hHtT%eZ)a z&0B)RH_Q`2iu)Wo<^UA&HH7drq^SiR7HZ`EV&wf|?MZ$;X_qZ37=jB7X$1!3^Bw<_s}2q z?CH=++dCeo6Mq;&H+2k0PIqDVU4uO*kldt?^7aTEsmyl`7<%b?8-^Mt2rGnl1bKyOTw-k?_uk|2r^ z$>5YEOrcQg(5BTH3?NaF;Fv0)7zAo0Ahy%KNo%P@)=I6?Zt0*TN@pu9&sZvx2->Kv zvLLt970J*~N%{ZO-udRVCR@kM-o)tzp#xnw=Eku{$JI=NtE*YSxzB80wpELPtv3m8L^Kk$Lz+Nf&$F-g<>(=xO0S(jfBJ=J#=G~$vZ5cu9g<3hO3^xz5 z81A9>3Txyw$~xf*S!btKi=|YeI1i~}GMh^!1xdv;+B%vf(REZ5_MrxQ@m^)))NF6D zQ~P$h4$P!yn-o#nNzbI)q&IL^T63HYk)&yvkr=mQgDu2}wP-s=R>G%VMm?Xd zou?ADosNGlA~j1b5^71hZj+5IBR=+|zNy;V!Ill~b7IQhC3&a`ebAa}?PZS4&ySud zn~y86;;tx0Jxk%V%lTU{rB9-i9srs?z&+0TRuh(WweVKVP@%&otdXDC~Akd@O^Y+ zcBxjpCO10#%w5T0ltOXckrmXQ3Y7&x(TkMoWknSDV#k)rtPR0((1u`H5uHxTu0B$y z4Iyb$6W91;oX?M@h>=8$BaO|*dBzr_WK=p$D-$=Zxcy*8rv5z}OZ>43(P?9KS>_T= ziE2K`P4kcctIc4qVoy@5R*HOtv-(s>AAAngs1iW{t}~h8jbpf3Ne-#PcJJOgq)JJa zX=GWIj7pR_V_SVlmT4r*jcIRF7-Lnk#fibe(#N+tkj&A@93)+g{BC2XRwZ2~rzQgB zgc&o$cK?e4-TvaK6b|?`;xZiUmXj^)@0+3*`o8owSRg>$$$Gz)I8~=J;0XjoQ5Bs= zk1-%V>Dg*|!6JA(-T;l{ByA%cBRolegVdm(ppLUmc1-k4_Kx>Y2;Ahkm#V&yAlO4j zy(?=sy0T;LVl$1eg|2u6&F1Xu1N`wZ8!$O*d&t@@Iw!v5s!P`P#X0dqvWDwGe@hO@ zvGRr1%4NaIFK&%93DDjy;7%&&x@b6GaO!CjI_s=9b&mG%e|_TsomPhm(K3;_O5AI$ zRH3{KY!wupR+vg|AiZBDL!RE8+PZ6hYWupENX|QdA%Ue|-2Qs%9r_BnnEYl>>ao9m zm|DB#B{K1Msc%#JNd*aPBF0~(KGpnHu>iwtQV(*wdRb^R=$cbZge#;+A$3Xl`f-9e)~)+K}$_KxyF}!!Np{@$1yfsT;@@ zJ0EB|r|0#l8>Q_Q`}D0B??`oYJR^{smrc6PWoADAL#V6PV;_1U7j3bq-Dn~9-oc6C z3zdt*qMpApp>UKr=HbNS(q-13J7}9#oG@ky?AVVt+5Ht5AKR2$P+?=~oPr896I(Me zmj8KEPFyR;`f4U-<;k;<$hVvwJUcSRI4O8>aDo0Z%Tnud!wuHE%}-j}t%oh2TUA`! zBQ~qkX0zI?CcQmCqkgwRXJ_v(NM60(?eY6U9>#HIuVQ!7#RkFSfoLul3$NE|wJ4#i zY=pA15$bf)LUGIey6pQ*y6muW|Ez+0YC68V*A&T{m)DXfT8G6l7kOiD%!M;ZMChjPj~%0DK(%9L?Dc zUsIMaW{F^y;4i%OKmFjEawHt53RA^}!Xi=3FX$yy1qTaf$>-$Mg=_PM6pRrXO6 zOz7fh+#ZPg_3=Uy2PpL0#j>7pn2vhDT)Z@JV}J(o-R9y@K|aZs+!E7&+9Qxqu|5=X z3tYJ;Z1$=q44J5jzqwFyMEeDj#rb&_vr&o$b3y?{)`)o11hQgJ9Gg>1RA zSm?My#&(!pv(R4_<@<|KJ{zO_2`BadiQAdc#46jf)6m}eT*f|@%~oH}soEA;)9^zH z_G~k=HJe@MGW)YCdhdrA9A~|N^Nc+%VcX+qy8wHMEPIfy2|no@l_6SQmf1PXD~QK? zRcLQ@WcGAjPLEsk@JXe^wKG2HIrHR~uA2MQn30qEr7jvZYsS^z-1*4&E2Qn#^-s1w zRMm&P+t70Pij%*2G4+po$vf&Lw@etcu(obS%rhlX`N;ISzn?biwQDRl-FnS=Bg)I? zbnCZe$>RMB7kvTz|BwW6JAZEXLDHw=`b*}h5+6MHf?MoBb;qYwPQ z0@1hAg_|PUbE@Zb5kgA@A|SnmWcIHPCyiV`DjDVYP1dFVIIAU3Wwl~TbF7~(`PDwE4U`JE!BBMZP8wAPmelMIpV5RKr*ji|HD7n(_OUW*nXaqn39IL zwLxr9URFeRduC^^LhPpu5zkhZSRa=TTV)ea8+-Xpr*~#o4rf*>kvOrHwcZ)0S<9Su zTIQ5*3Q@Lt^x&RqQ^ZXo?vZZV?4IXtaSQIBFfif~12JUX1_NJ44B2JGkd^MpTtr7Or%I9k*H*M6+jV8Zucn5mw9e!Vj_uRVawV$2& z;RXJfdBL6#X9N0l!@GLzXGSPrD=q5Yy$IENvs70=H{ObFybay>HoJj6ZqEJV8NVCrlgc)?=+*#E@*;IH?GU9C*=6F%thr@s2fO`C;h(e-P(IWm3d z$w!#pEJVH8it=qDakgXjP14TSCGE9lj&=*1w^p;QVi*~!4ATn+#i-B9mBj=WGckrt zC=wwZicCj!O~>AhxFntyi9iVv^#)Nf7~(n6if#t-J!*mo5m6XKLpNivf-q2+9gVSw zaniwZ@f0Z}>!{HXGJ#^)LAJ0zHBPo910a_wNrftBn`(^2=(iA%bfds$W_lC{eE4x3 zQuaiodie4A)#@SjROh3fYFicWS^lZPd^VUkUfP%~V0}NoiEnJ`X>PQNc%xN%wrh#* zIU0@JLS&0xc`wqtlBr-KM_q;V=*VHGUKjnRUTzfDZ5E!McJ})9r{o#y(Hc2BeOL^N z1E3pJ(sjx1db7UBXZ9Brnv066%)MQefxbm&6*ZZgie{T<6*ZTxGOsAS&;4uvlV(@9 z?1SI}{_O#y9``-fZL4obx4pjo-QIM)-%Y7?lMqubHl`ohho0G&JbE$3Jc3bSPuQF2 zUQ|&fR&_s19M*lj(wMkVnUz>#T4{RO^u77}gsrl|LPWJBufkInb$TZk&Ml;c!4gZ2 zDCcrwb~;VilXa*oFq8!aW8oBaO4mI(?cEx-WU}|MnG?^K(EMl~Tahv* z>Q~x|lqa$^BaeOb!(QUf(>4~kqxV;~BqPQ7%F8amdF}MPq@^Iq-iM9EOXC~jQWbNt zm^Fo_>wzqBocq@Co>hErDHN+Ht=e5h*H)1#_L)R-4!g96zv$!1_vV&x+pL7E@)ACE zO7eE;_UmX^SEHjkCpS0vOKm#sMIj6K$M~yxIxly`OpJdW_p)^s?v(M@{&hY3oPLVM z-sRVY3+#m}B+J=@<6c7bv~9CD>>RpqSg$$n%^>A< zH!NG4)9StCjT>)xYNV?7qzNB>AjqPia)3()nyS;uLXjr zYYncT#pum?Yu@bI%-*a?=5yiBd&5XnW761cY%z-3n*>HaV~pO+!-NyreTHLPM2*>x zgN*FGNjBCjCpm_Vy>St-QT_o97Z+POaNP_dpAVw8jmgc(a?Oemv=317Dcw!b1zAg4 z$uS~^Nevl61j5WbHg5=EB-zV)NtxO#8AAj|R$sGC;3t)aUhBg^G3?b@LA(mSppuf=h zo+K%LV)KHH7i^fHOnvj)opWf#xZ9UJ^Vs4g&q&)l{(0Mo+g@3iI+{B08?t8CxSRI9 z`o;_VtF)2n!@?2Nx_pNH8 zewl>j8kx!#zH4h>r^$SElC`hxcx}07pki;Q)1wJUm&f!AGXJ#}e|Nr5BbF{W&fex) zkbP&_R#l=l9m0RyK}Au@zKlye+FQ33{By8QCzQ#QP`xqd=(F#ozFqLejqCp!-r!p{ z@rI`!y?*wsWVz?L{UnDNo+0#_4G#t8T=e`~2liZxI%f#V(udl+dSqAfN&^+m`Q{39 zty${j>=m3q#~MaE#{_54Y0`B4RA+N=cld4TPmcF}pE^Eu9`*dq_bJyc?r=EaX9}s_ z&$N@=gXWogxckyx=6YIZ9^yPJIKeR9Jj48{?sNAKacyPeE*~35n}v;|E&KS;mVHdfh-5RLNQ>->_DKZapVkb=JehA@X|0aAngZgFR_>c6lYc&-A^^{y-BhGB4Y$^}u8O%w%YB2huvltDyLM2IMw5GCdsG0Ov& zhZ>Wxjfkk%x!2{!g$G`jT=B8wNer40d4}*5n3;RdS5-p~n&0oua{BAFy3YB|xBkDc zI)o18Ds3h=SGYx)smU?F5*eoIkaq>6%CZ2UkA5tiaF3_?q0v#Pq1p4J&+ zS7g5*ouB@Gc8RzocAOkfS}2Z9BaxUGXYMr@&zftk4HmI1K9f}?w3Gar?UGQkcYCl@ z7TwT~I8-bo!g_=O<(y#FBs_!KN8orHe&PkH^1gs2HrPigbq&SM1~$?a3_+2vEwbAe zm>E=oE8OD5?fr#!IiSv)22WEZZWI#kE?z59E=v2vSs-h>~y z@4}llGM(d4c32_2j}my0SMDm7G@do&v{7$tPIagF6q{BrT~REP z4Nf<+DvptY>+qb2)b%cQ`y7}<$KwOQB}`^Q_f1S5apYkPna*uPM;0ODS;$rt*CL$q z*zQev2Hm~Lg(y@#M4_tKenyQ!{OPM8O78|y`Y%Au1F!*#OsSm@)fMQ;Wa?$S6IG() zSVhS~;dtqWV^MqNWWlaGQaIAV!c?tLHQ2&ROD|mNxt1*|k|&+TOZ;y_3FY@#XEAk;AUIX!maJ=W9CW7B9X! z`YZL~8P}~jHH+@N3wkGUU(lUbjvDYA-V99>+(s?$UapOHMWrlN)~MCGjnysM2=_AW zVt0ykjdqiELj6Z9a&~oN-FemL)m>hnpDx zG^24v*cNT7fMUSdXKMS^dnOG2({1ZkWAX`JYn#tD$dtXu^i z{lb^V>{YCMC6LAmAA|zLiEyu<@NNRmS!Cios;U5HssLuH0A{MFdDYx)9x(e%zQSxX zCs0enucrrDZNeTk(~MWlDt2@;%Z}{VotYq;%~YXrQ6&@4h4H7iX=e~vwtby-SXE8e z@I|^?LgLU3=KzO8NOwy}cXxM#ARsCr-JMD+2uMn&bP7l#Al>zD?)QGw`?=rm+t>MH z?=`b#X00`|<~rx>YkqQc+!0pO&E4A2shsTW?HJ0u?R@4ZZLP&i^5Y|~<@}g3h!@Pn z=RZv{;ZD@>Sbt&HbDO&9MKSn%5tj!XxwewDH_dw#hk19|Y5I6i=XAhvPwQ|1x+QyI z3{$XOxTcwJa3t@toh;*pZft*(&s-s0*bX@PHnrgQdUtjL#W|eH;Ib!*QK?RyRV7qa z(dqNywUYyP-PqFi94+c+3O%(Bjp;$LDTK6|Y9~)OetdU<-)w?gcK7bIB~{r`bE*qj z$#z^9TRJn;M@oF2yH=eHIWY7b%_euLAi{Tf3PgvNp-{o<;-4jyFn1i=%T zP?`YN9o^L%a{l6c-ODgN8lNa}HjK{Q_~6^;=$OjQI{v-}*eZ@K#(+2{tmj9@J|2ke z9LG#MB#!s{6dzX1f!oJI;oab^`xf+b)0ZCr%Ow`Zgy)4^Ql1ypQs4HHo8i-u` z1i-hX?5$%Tn!p0%86t)cd^ZI%&-4Y`-;l8iWA_ig+c|&2h$f^?;FtUwVtMaoOeSQ6FLytWnk`-I!p9MA__aYZ$y$If0 z5M-^e9gA;}y=urZia2*J88dcUB$+hkzV7z#CYB8fPSD`-N#jg2^_`wxm#rwwQqXu(Uzr3!zRLkLU`q`d3>EHDVFuwyH@` zVP3-Xx{4a@akKp+(L@`j?i>5HWUd0nqXOM-T~?iMk+WTpsgwfNoDO~zRT#9a)~p2t zqSm!2vnwHaFOiRN2pb_`a^d+h&Cz0EjiYhnW2m{O_>6`*RoV@jakp7;y|LGHHkc7D z$?l)*C|v*j#2E%Akv+pZgD$x*&i_?3X{UDo~q{joc5|HI&?n#wHWvQ3pnUq9QjOu=BXk-}Qh%PASPgvRrNcGJGJ z<-kMA$ncKgf;)x)n(o|EW+yUbVqUG-Epo}kBO%)4m5a~V_!?UlE9MHYx2>a8sV~$6 zka+|pm#7GI>1urQT$j*wh4if1pat*}V5hA0sl1KY;c=n3OIC$LVzqbWn2OmCUpw$S zGNM=*v8+C?HV&+gz#;5dGNRc(R*{3Kk%fmL^b-MdS)qBTo-R; ztl}&bip7OdB|)O#ux5m@eAA_csLA+_Hvrl+L+It2Ca-zOLc)+Itv z_K|g6ooWh3k+l^NolF?e*x?yZ-5tvi$mS<`8)sT39xj2Kf_?et?MB3$Xj~6xa&oV{ zSR0*vxToyHf`fHx>1WdJ8t7J9$UGHJXwr(tCK-c>Va9r|>6q>ID}?tlo*RB!ygL8Z zKBUI6GT0R5>i{D3Jg&x)Kl_UL6@&54Ia8%n63Ob#gBinI2Bb}-78aEfdScK`ib0Vt zqM0D*B2D@-;&$2T&ZFn)U2UKAwXe5LezI6tr2#K!AAi$HeTg{~u^lZ(d8tH06Lo{d z=TtU(-*mjsAleDW-^V`Wqq808ud|($VCA1x=65H8mHr&8{cVbz?s%~@Ybrw-9egwfggvE7y51II|u_Vk$1(|&DTvpb} z!50$rw4+ox;o6QgwA56_FGBHk_B3Q9#M`k@8Mq&o#YDUaCC8IOF^CrzGqx^wPE{E= z3O^U`qr?yFL#0IPcBQ4-C`~YE%_O_IyF1J zPURIB+KOo=4)R*54*T$&9zLELryXv>RGgv%3(k)&m~8%g0>c{KG!fXAvxVn5ms60! z9UP}q5C%H+?9Ky{N?b0S?S8N^Vwa7SWq4mGQxb-Nm>{*{#QwnqXs1+k`q{<-MX0t> zoH~g9kQc|>d=bw(TUO;srHpb|^EPA{actbOH-ZDv_E2E|t-Ym_L*56Q z_-}i`Pn zh%NG=pwmraO*ucxI`$(w=gJz%An4Y8BE2LL{={^*QfTdVhivVQ0QK;(8KHT7!6O69 zU0Di+Q@V@v9jSS*_)9LWSC5&uGblLiY3H3|A4Ex%q|%;)W%n()_sxLMhX7?Mcu4NE zHkJ?@Pn-21hCIkPCOEthi7$GEVv`*+oepzM*xv7LWSP|Z1Z^~ZZ^-J4)paw^x5_U0 zOplP8BjK?-gMF91Df=DgL{A1h`MG|O4877{9@r0d+dj>;LfAJ&N_l7Une>Z3P6|d*u zY9vA-mHgC#lNnK2f(}{`jmr^R6rr~ExO1jj?}L=2bZ0n&w61BM=}KPQu2`c>*aYrw zgkfwt$AG3o<+}7nx1-F$8`GDJ#AEd=VJs6{mrooD!D<;r?;=?_X4Ic1EQh}di^#1I zMAjQg#gO3TY~@!Ctl>hX9u#G#Q+Nr78`KmnX2kp;6nn5Ve3-P1qv*bD#>YUYDH1Jc4gu7l87f_iy4@haoHNOc;loo z2!a6?cmxj~sV|A+eK1jQ#l!9iIb{$<@5b^MxI;yK+E~DSvlV&?qHxt-Y@g*%W6?gC zP=4)mUU+pjP`}%nF-E~ZxA4iU*HxLL!CijO?t22IQtF37$8T{>41sPJ#hG#QYK;bg z8(%yZZsFsQl8%Sgu}0nNo<8C@Shvm>Aq9@Tsxm8KukD;wHC40ihsC2y7A0G=wDi<7 za?PZq*Duryapn0*ULY@rj|_JVpK!dL1gRDfnXs9C3Sg?&%965(i#}0)o7E+(?4QUd zsW9-^f&ZN>?#SSwTNQpRJwdsrbs+@BmpU<&v&!6O9m6%OH%WZDhD>8Vw6bW1@|2ru z$69Q9Z2r6H&uRZ!(!xbJ@ngdUI6McHua?yxj?4z@4iyV7x=CYTIE(S1W~NSm%E4iw z_=GHy8H|T%bSi`3Lhtt{bMjIARphkP(g?u=a5IHnFQhAc3eNg<7YuKuHt+=#TC`oZ zyA-1weXBirVuwkhhr32!Eco&Wc`b|mtZVdWI;EuaJEj~^ZBAnUndYW%|6%blY`X1H z^t%h{Kq}#stWJ()$<9PVZ3vkz1c$*ClZRS0;qoJA;yV{C51EOe+Voquo1;%gE#dng zEt85>aYNT0T=ipPzN9nQJl{;7YGL8_>iNN28}qER=E6iq0L2QlT{eMR{AOFLCl&v^ zv0WKmMjd8cxt}t-#Gder(hOYyk;g#10U0!DS<1rpn<6-1ufw~j&_1do~B<(8w z-OB0bQg&!vmv6E3gj94#o%$GT#B%v*OT}_IX|?1z_8cM(p4C6gi|>z*vME#_e6LgZ zr1*X5%;uA+^S$&O+R@&Qn5x&5bfFmaFL7*%HR5(%8b`}(ZFhYdA<~(711cLRJ)-tI;`(+;U1Pbc1 zx~`4bkJ<6U@t^Tlh0LmT`B?eqFUQ51-ui^Oh`c(aS)~w6YMM8pOMSHP9JT$C#h^k% zUHt*|Dy=pZq|34{X^@4+d?vZ$3WY6#Kj4wnBjF>mE-AlLyE5w6V^F4H*+=13b=cwa z8l#jm4vM143+iv0xDrhFuD7fBEqjlz z3|iR;cN*7%&|RVQRz&#CjVb7%bEp-MBq-v~kuC_WuFW*aQQ$>NazuIJ<#I;XdDKyf z-msEUhP0xEDJ9~wIh(x!cSr<7S$7doh>yBSMzg-1gDM}uOYN#x{Ct0C(`NtvV_ET< zR=Sq?DyK|I5~mh#6xJGhlP(ExLT9OscoK36iJoCmu}Sc7Hrl&a+8^F$xV{q(LLYys zRqsN_9GlFFH1B*F47W?QZL>W@|0RkhwdI+JNTBv>VzZFh+EncMdb{5JYvn~hyY>s3 z#{rxd<(h>vX(zgt9BKH`*hBAqPDYq&7C+kp|4+>C%!hGt@YnIDP7|dcI;j?sWOP9gn^JOSpi0VZs z+Agt;HbM1VnnENY)2m8{;$(4Wd8TCMk72xnA4;DR@*pKMlsaZ^eo}rb&5~y+gASbH zsuL+aUi=MYpV&|!_!fmOR@8nKVx+OzjABH)sD>Vqt#gR5Pr;Rxg4!uo5j?RY06(h4 ziA;A8!B#UAF)KpkBJAlJVf}%|{%f>Q-}1nuE*+xgMQC)M(UxxGj0KZ!>o`7(zxTHL z5h-vo#p(5tQcm;r!O5gIR~BfhqHXl^0%gV4#TIa?bX2K|wtq}Tf#Gr{kxFVXhDjuK zFQ2mpr{}o5zqO_;CB#l8k&W@q^_f01Xs}5QU-l1hoS_P z`)o9>V$h-7v2Q*qOePnp{v$#t3toUDE5Erv>U>6P~i?r?Jd8FS&b8OJY_`r4epQf?%a0Hi{%!q&QmW zhn9;T0{#VPOAQ)^DeV?YM;aYqOr#0R*CEB*z1t*aJ+)cnR#sZFy9Ad*4+zyX(Pe`X z>?CDyob3CBt{39)SW95cDM+FTm~X54$PQNK9SVV~Ju9ejnv(F?l?Tl3W?m0P8* zscgM0H4BXfO({+BW>?0DzSw~xd|DCRuW1onSHZ4*SS45Kg(@ml%4(t_8A6Soi~@a1 zDfFq*G3lct(VCovcxBDtXRuytai4=qiX7L@BRYyf;7w{ApRe}EkC5w_qZ*jj5+v1? zk)0^wOZ{*=w?y`KPZFoH^dSB+zYo z_z7q;bEr6IYOYMI3jKuJ3b+XGtN6;l#_+@OEB+6bO+@Rc%SS%q-=c)n?_LT$Q_eND zp%Em9MlRb3mOe|HKVoOdYF-JGR>Nni7nvXaZNTHT1|wI8a#;Ti}3`9hh?7; z7eU{Yc+nq`A9ft5X{-r{X;JXlF5t7pv$t!Xa~odfQoo7$Y#2#Z%&dA-ji#bXh-GYC z`R0Uq=IDtzqV3)ru3_JHv|yC@!+5d-r-akaK+f7HZ=K-;-}KZg z*hNSEQ{u!^g!J{RlNcY4u^S9D^lq_h)&01NpxiaNhkJ(S38Rm!bW|{G4wXWkm+%81 z(G?MS-|=1U@ZS=L#eYzZQ5RHuPLAI_|E6TH^Bch6U@oewDoI*tMO&H&j>8(c9 zyATGf@W|RP0S#?K7s|+_R)NW03oX0Hk4yxuqc0$Qz1>4m!LS>+s`UzH9T=c|d6MbVp+h=-pBHM~ZxIIJa=Nb}b>s~PHSd>`$hO)0_> zpB;pv1ya8tVcyf5Z^c&aH9vK`5>2bgaeq&G8owqU;y!i~|FSPPf$c~wZ~>hVDrqoI zC_TVfQrt>oFiyZmEzP#-R34werK6pXv{ryrkHeJveU$Mcy+?AMcqFc?*YqR$Amr41 zWNV#pq<$3h@byT+GWjU=g`{&)z2GJGLB49m%t`S=anyL4`$*eQnQ^iv{bpr@ZoyLF z*I87>R@QOZbgbs} zB+~J5q+Fn&D@)K0wfZbfkDSF$4Y-Af)-iGqwIg|ut4!?Zd^9rbHO#4FI;enGZ8SyEB z$wO)Cr|e{hWIecbaA-Ag@=^3RKI1xL(om~0G-da(`wOt_pmm^9YrQ=qaD~ z&N}@ANWVXFoebaIh}GAtFv5bp8KtHNmow^kJ2T9f##x^TU(*WQ(3}K%-Cg1B3pMU7 z@j$ybzV>7ru`)W}8To!dgpLL#I=X9Nx!DlMGoN=hd?m8TKDikVNR~m0+@Z~$eD!W+ ze|jOwLujrGCU~`c`F+D-aeVG%wC4G(h~zN`){J9Zn(t&&P1FE?eG;qh?YIN;J4GA9 zYeVLDY$4i5sUjM>H^dK>1u<7q+yVwVQhV?vX>&zM<`BY%$@Cs9yr6%ZLNvh0^G-Fu zwd~yRax?c_$?dbxZ#vw~rtQX0teYB;H zUUvV%zl&Hce%EyBv8r#&ah7wKc@TNR044pYqA0C0r$9O`6pG$2L2Ujqf>`ERBubMZ zu_YmzXvQ3?%Ib7MV-+Lw!(kQI#r*Z8V8yXYZW~lQ@fA0DLPe9($+S(yFyI?vF|WyI zYa^Np*Ya}L(p)Dp+!E=MZJ7+5&Jp0n44(v>07LaeU*?H+4zFbiXT;<2rmw&OI&1kh z4>NY;cg-u1r<@;jDx6fGFllTCpXhI_^RVt4U*?TY8olILd)B7Y8`^ z^vHP*N*B6s&sS$=x~^-5k8cs~tQHqknzG`2=C1~msff5Ub|!ppiJI+f9FfN^6Pn^a zwWuh`#nq2zDP6Ymv>ZOTh9g!Kl4+tiLb{>~l=<9Vd{wa@_kgY#v}hN7%9Ur(_sYD~ z)`c`lF-1iMpPa*Hom+VrQ6Ys?Pi9Y^!LvZ^QDd;zqvHAj9-30CtmQrjQ+n}&#UqQNhuDQZ4mS~a=K`q{XZazS|n_~%l{b(z_D z5@$U>d3OAS23S=@mOE28O_dB=ap-c)2z=uh7jC=*`@DCYZ?^O9X>gYAx9~cmc1`F_ zDY>oK>z>QKasND6SfZ<5tL6$`<5vZ=FwAFIurPA6D< ze}e3X)7;pfcp}%2j?Ly8#Opowhsxy&>?6)8>V-*Js-F@JBqoi8Z{oW-o2$o+^==#Mr!&=6W9i6lPv;Oc+6>0Jjq*PvkmHo*Hq);sNNe}bM`tGO4u(e$it??9^&faz4qGoX^qu=a=z}|H#RevFYucO~&X+`e< zwEz4vv)e(p-VT!Ddw7<{p3I-pxz6Ak$S@Xq7W)Ekv*q*YNr@uUVZm{MIF<}DS+(x$ zZj5GuCV92&>}t%(s89G4TEW$rwNaPw^mye4v&<*MbYrX5>?7vi=*ns2$6Y6IECcYr zH&fw0H)@&?DSQ9%R>HB;HOj5-IHp5(nq;e13Zp53*@PpOUCQChs7!r=(RiOTHm-td z?@-YVF}~68`%G~sN#bmPD`=~{jf*~usUy1Rbs7G?PRs5A+s@8TM@Ad=&(hgV zx}zpm$z#bOlUGa6uEaQ$%u%Hk3}ShU`Lt7E%VP_yB9CH|Rg35>#iSf6Bq<~^AahA7 zI$_lh^VvDLJy+5WReF?|6*mnp4$cq>(Sf{4F5}Or;HPn>NFcXscr}OeT?aR#n!f{W zV58Npiqtf5%Q~!t-wY13X?wAU!)Chd^RAAH48K$pJQu_>&nKv#Z0cdlO5U#(Fi6g1yjqS6Bn*si#saBs zl&Ht@z42n{`dE*jR7nl1Q+T5{KNjv7HFkSwf_KBpE7^z7*M)ysk(jxT>f^Dq%D+$5 zp;Vtx1t;)sKYe;DzWnrJ@nhF9%F(CcgZj$qsL!)!1Q@+f_c`B9N!{4OMM0Kqk!-)J`9K5-IGI*PM^Oz=^HG~PHv7a|4YPEGPVb(d!m0`ETS&f#&(M8K#85Pr7H-aPl>vk z0>C>E9~jEc3FErwQQbFiFJj|_K-l>p++d!2IG(w^28bKV4gkEkxp)DTnz56NxwWO0 z8wdsgq?}+b05B&D0I{r1MI9{d%s~J^?|w95f06gtczC!0P#74-3u5EqC?SB71D@0!rDY`)jhz70pc!DlwVU@}Vl{79z<5~)3&(r5 zpsKm0wX2(pH;7Tx%+bW035`S9#mwBr+QAaU_#ZVusOIkEWM^)F??y1d5aZCirw{7F zpkRG~H>V4Q@~}gooLt=c_na_*NXW&-#SR9%13W+hY8wp3&dUY3^bh%O*{?f5nGXb@ z*M7AI;^g~7Ss%m&u}7|J?m9<$-{KkpLkF zod3K0*9ZXV@48?R?=LHU*M0jPTEAu0MS7hy1rgIDZ-Z>rNkFLjr2Q3VvCn4+3HXxZ$@pFA%6-VFQX` z|7Umlz`_FJ{+|YckO3V)c)@=}7IGhTUQW=jUV(KWp$US5?j~ob@MlK8{>Ev0rOuz~NsrLsH&YI>8IwbXp841g z`U*TiSE(&ah&nr}c2AY=vWB!$cZAIMsI-~spw>or>}&H#HP$9p^7TC3ll}baHveu&#Da_{e9h<@H)IBFG)pD#&u~4 z6L;0vIPzEyGb%cC)B>Co6zcvoEZI^L*U?T1iU@_s{88&rGcl+dz)Gm0)!#D3B{D0a z^-jU9*Iks0K5Ch*=>L=#VkaF3SlOY=yX~p3gb^-7f(Z7@M|CxjD6TshT z=03jwx$ys;kR%;U9nJ3Z$FH=*Ci9;pY-SAPLm){3*he5S+!HF%IMf}VIamWK=Agf3 z#UDA+%f;LR4alf4G$675eSvtOP%bFQ0`yCE-_ZR9a`;;Y;RKfWf62h$`)T>ldfc2a z=)dc6LI7^!KidOfX8$b%L!tkY0c1lcKyCb|E|~jYV}WpS-!K1v*5iT#+3mk%z)l5F z7XMj~8^ZbDy4?2-{#QM22ozX?|J3CM67(N-xVabuJCh3 Date: Thu, 25 Jan 2024 17:06:55 +0800 Subject: [PATCH 223/514] HBASE-28329 Output hbase logs to working directory in client integration test (#5648) Signed-off-by: Nihal Jain --- dev-support/hbase_nightly_pseudo-distributed-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/hbase_nightly_pseudo-distributed-test.sh b/dev-support/hbase_nightly_pseudo-distributed-test.sh index ffe630865925..8290f06110f3 100755 --- a/dev-support/hbase_nightly_pseudo-distributed-test.sh +++ b/dev-support/hbase_nightly_pseudo-distributed-test.sh @@ -324,7 +324,7 @@ redirect_and_run "${working_dir}/hadoop_cluster_smoke" \ "${hadoop_exec}" --config "${working_dir}/hbase-conf/" fs -ls -R / echo "Starting up HBase" -HBASE_CONF_DIR="${working_dir}/hbase-conf/" "${component_install}/bin/start-hbase.sh" +HBASE_CONF_DIR="${working_dir}/hbase-conf/" HBASE_LOG_DIR="${working_dir}" "${component_install}/bin/start-hbase.sh" sleep_time=2 until "${component_install}/bin/hbase" --config "${working_dir}/hbase-conf/" shell --noninteractive >"${working_dir}/waiting_hbase_startup.log" 2>&1 < Date: Thu, 25 Jan 2024 17:07:30 +0800 Subject: [PATCH 224/514] HBASE-28255 Correcting spelling errors or annotations with non-standard spelling (#5577) Co-authored-by: mazhengxuan Co-authored-by: Duo Zhang Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/Abortable.java | 2 +- .../hadoop/hbase/ipc/AbstractRpcClient.java | 2 +- .../example/ZooKeeperScanPolicyObserver.java | 2 +- .../AbstractProcedureScheduler.java | 2 +- .../replication/AbstractPeerProcedure.java | 2 +- .../hbase/regionserver/AbstractMemStore.java | 2 +- .../regionserver/AbstractMultiFileWriter.java | 2 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 54 +++++++++---------- .../wal/AbstractProtobufWALReader.java | 12 ++--- 9 files changed, 40 insertions(+), 40 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java index b9736d573454..b0a5a86d50bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -37,7 +37,7 @@ public interface Abortable { void abort(String why, Throwable e); /** - * It just call another abort method and the Throwable parameter is null. + * It just calls another abort method and the Throwable parameter is null. * @param why Why we're aborting. * @see Abortable#abort(String, Throwable) */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index fcded9f5b69d..5926539d0679 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -209,7 +209,7 @@ private void cleanupIdleConnections() { for (T conn : connections.values()) { // Remove connection if it has not been chosen by anyone for more than maxIdleTime, and the // connection itself has already shutdown. The latter check is because we may still - // have some pending calls on connection so we should not shutdown the connection outside. + // have some pending calls on connection, so we should not shut down the connection outside. // The connection itself will disconnect if there is no pending call for maxIdleTime. if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) { if (LOG.isTraceEnabled()) { diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java index 53288be872ea..fa7ccf737365 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java @@ -41,7 +41,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This is an example showing how a RegionObserver could configured via ZooKeeper in order to + * This is an example showing how a RegionObserver could be configured via ZooKeeper in order to * control a Region compaction, flush, and scan policy. This also demonstrated the use of shared * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state. See * {@link RegionCoprocessorEnvironment#getSharedData()}. diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 4d1d5c1ccd9a..61f73544b1bb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -236,7 +236,7 @@ public long getNullPollCalls() { // ========================================================================== /** - * Wake up all of the given events. Note that we first take scheduler lock and then wakeInternal() + * Wake up all the given events. Note that we first take scheduler lock and then wakeInternal() * synchronizes on the event. Access should remain package-private. Use ProcedureEvent class to * wake/suspend events. * @param events the list of events to wake diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java index cd4cca0f9186..b3b01f675c7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java @@ -52,7 +52,7 @@ public abstract class AbstractPeerProcedure extends AbstractPeerNoLockPr // The sleep interval when waiting table to be enabled or disabled. protected static final int SLEEP_INTERVAL_MS = 1000; - // used to keep compatible with old client where we can only returns after updateStorage. + // used to keep compatible with old client where we can only return after updateStorage. protected ProcedurePrepareLatch latch; protected AbstractPeerProcedure() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 5cd3a92e5b69..62ff6f9a92fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -157,7 +157,7 @@ protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing mem Cell toAdd = maybeCloneWithAllocator(currentActive, cell, false); boolean mslabUsed = (toAdd != cell); // This cell data is backed by the same byte[] where we read request in RPC(See - // HBASE-15180). By default MSLAB is ON and we might have copied cell to MSLAB area. If + // HBASE-15180). By default, MSLAB is ON and we might have copied cell to MSLAB area. If // not we must do below deep copy. Or else we will keep referring to the bigger chunk of // memory and prevent it from getting GCed. // Copy to MSLAB would not have happened if diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java index a02b05f66ba3..6370d6a79ccb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java @@ -64,7 +64,7 @@ public void init(StoreScanner sourceScanner, WriterFactory factory) { * Commit all writers. *

* Notice that here we use the same maxSeqId for all output files since we haven't - * find an easy to find enough sequence ids for different output files in some corner cases. See + * found an easy to find enough sequence ids for different output files in some corner cases. See * comments in HBASE-15400 for more details. */ public List commitWriters(long maxSeqId, boolean majorCompaction) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 1a5b5384b01f..ef25068512f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -259,8 +259,8 @@ public abstract class AbstractFSWAL implements WAL { protected final long blocksize; /* - * If more than this many logs, force flush of oldest region to oldest edit goes to disk. If too - * many and we crash, then will take forever replaying. Keep the number of logs tidy. + * If more than this many logs, force flush of oldest region to the oldest edit goes to disk. If + * too many and we crash, then will take forever replaying. Keep the number of logs tidy. */ protected final int maxLogs; @@ -336,7 +336,7 @@ private static final class WALProps { /** * The log file size. Notice that the size may not be accurate if we do asynchronous close in - * sub classes. + * subclasses. */ private final long logSize; @@ -346,7 +346,7 @@ private static final class WALProps { private final long rollTimeNs; /** - * If we do asynchronous close in sub classes, it is possible that when adding WALProps to the + * If we do asynchronous close in subclasses, it is possible that when adding WALProps to the * rolled map, the file is not closed yet, so in cleanOldLogs we should not archive this file, * for safety. */ @@ -404,9 +404,9 @@ private static final class WALProps { protected Supplier hasConsumerTask; private static final int MAX_EPOCH = 0x3FFFFFFF; - // the lowest bit is waitingRoll, which means new writer is created and we are waiting for old + // the lowest bit is waitingRoll, which means new writer is created, and we are waiting for old // writer to be closed. - // the second lowest bit is writerBroken which means the current writer is broken and rollWriter + // the second-lowest bit is writerBroken which means the current writer is broken and rollWriter // is needed. // all other bits are the epoch number of the current writer, this is used to detect whether the // writer is still the one when you issue the sync. @@ -807,7 +807,7 @@ public int getNumLogFiles() { * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, check the * first (oldest) WAL, and return those regions which should be flushed so that it can be * let-go/'archived'. - * @return stores of regions (encodedRegionNames) to flush in order to archive oldest WAL file. + * @return stores of regions (encodedRegionNames) to flush in order to archive the oldest WAL file */ Map> findRegionsToForceFlush() throws IOException { Map> regions = null; @@ -861,7 +861,7 @@ private synchronized void cleanOldLogs() { long now = System.nanoTime(); boolean mayLogTooOld = nextLogTooOldNs <= now; ArrayList regionsBlockingWal = null; - // For each log file, look at its Map of regions to highest sequence id; if all sequence ids + // For each log file, look at its Map of regions to the highest sequence id; if all sequence ids // are older than what is currently in memory, the WAL can be GC'd. for (Map.Entry e : this.walFile2Props.entrySet()) { if (!e.getValue().closed) { @@ -1084,7 +1084,7 @@ private Map> rollWriterInternal(boolean force) throws IOExc try { Path oldPath = getOldPath(); Path newPath = getNewPath(); - // Any exception from here on is catastrophic, non-recoverable so we currently abort. + // Any exception from here on is catastrophic, non-recoverable, so we currently abort. W nextWriter = this.createWriterInstance(fs, newPath); if (remoteFs != null) { // create a remote wal if necessary @@ -1107,7 +1107,7 @@ private Map> rollWriterInternal(boolean force) throws IOExc regionsToFlush = findRegionsToForceFlush(); } } catch (CommonFSUtils.StreamLacksCapabilityException exception) { - // If the underlying FileSystem can't do what we ask, treat as IO failure so + // If the underlying FileSystem can't do what we ask, treat as IO failure, so // we'll abort. throw new IOException( "Underlying FileSystem can't meet stream requirements. See RS log " + "for details.", @@ -1195,9 +1195,9 @@ public Void call() throws Exception { throw new IOException(e.getCause()); } } finally { - // in shutdown we may call cleanOldLogs so shutdown this executor in the end. - // In sync replication implementation, we may shutdown a WAL without shutting down the whole - // region server, if we shutdown this executor earlier we may get reject execution exception + // in shutdown, we may call cleanOldLogs so shutdown this executor in the end. + // In sync replication implementation, we may shut down a WAL without shutting down the whole + // region server, if we shut down this executor earlier we may get reject execution exception // and abort the region server logArchiveExecutor.shutdown(); } @@ -1467,8 +1467,8 @@ private static int epoch(int epochAndState) { // return whether we have successfully set readyForRolling to true. private boolean trySetReadyForRolling() { // Check without holding lock first. Usually we will just return here. - // waitingRoll is volatile and unacedEntries is only accessed inside event loop so it is safe to - // check them outside the consumeLock. + // waitingRoll is volatile and unacedEntries is only accessed inside event loop, so it is safe + // to check them outside the consumeLock. if (!waitingRoll(epochAndState) || !unackedAppends.isEmpty()) { return false; } @@ -1532,13 +1532,13 @@ private void syncCompleted(long epochWhenSync, W writer, long processedTxid, lon // changed, i.e, we have already rolled the writer, or the writer is already broken, we should // just skip here, to avoid mess up the state or accidentally release some WAL entries and // cause data corruption. - // The syncCompleted call is on the critical write path so we should try our best to make it + // The syncCompleted call is on the critical write path, so we should try our best to make it // fast. So here we do not hold consumeLock, for increasing performance. It is safe because // there are only 3 possible situations: // 1. For normal case, the only place where we change epochAndState is when rolling the writer. // Before rolling actually happen, we will only change the state to waitingRoll which is another // bit than writerBroken, and when we actually change the epoch, we can make sure that there is - // no out going sync request. So we will always pass the check here and there is no problem. + // no outgoing sync request. So we will always pass the check here and there is no problem. // 2. The writer is broken, but we have not called syncFailed yet. In this case, since // syncFailed and syncCompleted are executed in the same thread, we will just face the same // situation with #1. @@ -1706,7 +1706,7 @@ private void appendAndSync() throws IOException { for (Iterator iter = toWriteAppends.iterator(); iter.hasNext();) { FSWALEntry entry = iter.next(); /** - * For {@link FSHog},here may throws IOException,but for {@link AsyncFSWAL}, here would not + * For {@link FSHog},here may throw IOException,but for {@link AsyncFSWAL}, here would not * throw any IOException. */ boolean appended = appendEntry(writer, entry); @@ -1753,7 +1753,7 @@ private void appendAndSync() throws IOException { } if (writer.getLength() == fileLengthAtLastSync) { // we haven't written anything out, just advance the highestSyncedSequence since we may only - // stamped some region sequence id. + // stamp some region sequence id. if (unackedAppends.isEmpty()) { highestSyncedTxid.set(highestProcessedAppendTxid); finishSync(); @@ -1761,7 +1761,7 @@ private void appendAndSync() throws IOException { } return; } - // reach here means that we have some unsynced data but haven't reached the batch size yet + // reach here means that we have some unsynced data but haven't reached the batch size yet, // but we will not issue a sync directly here even if there are sync requests because we may // have some new data in the ringbuffer, so let's just return here and delay the decision of // whether to issue a sync in the caller method. @@ -1876,12 +1876,12 @@ private boolean shouldScheduleConsumer() { * have its region edit/sequence id assigned else it messes up our unification of mvcc and * sequenceid. On return key will have the region edit/sequence id filled in. *

- * NOTE: This append, at a time that is usually after this call returns, starts an mvcc + * NOTE: This appends, at a time that is usually after this call returns, starts a mvcc * transaction by calling 'begin' wherein which we assign this update a sequenceid. At assignment * time, we stamp all the passed in Cells inside WALEdit with their sequenceId. You must * 'complete' the transaction this mvcc transaction by calling * MultiVersionConcurrencyControl#complete(...) or a variant otherwise mvcc will get stuck. Do it - * in the finally of a try/finally block within which this append lives and any subsequent + * in the finally of a try/finally block within which this appends lives and any subsequent * operations like sync or update of memstore, etc. Get the WriteEntry to pass mvcc out of the * passed in WALKey walKey parameter. Be warned that the WriteEntry is not * immediately available on return from this method. It WILL be available subsequent to a sync of @@ -2034,14 +2034,14 @@ protected final void closeWriter(W writer, Path path) { * Notice that you need to clear the {@link #rollRequested} flag in this method, as the new writer * will begin to work before returning from this method. If we clear the flag after returning from * this call, we may miss a roll request. The implementation class should choose a proper place to - * clear the {@link #rollRequested} flag so we do not miss a roll request, typically before you + * clear the {@link #rollRequested} flag, so we do not miss a roll request, typically before you * start writing to the new writer. */ protected void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException { Preconditions.checkNotNull(nextWriter); waitForSafePoint(); /** - * For {@link FSHLog},here would shutdown {@link FSHLog.SyncRunner}. + * For {@link FSHLog},here would shut down {@link FSHLog.SyncRunner}. */ doCleanUpResources(); // we will call rollWriter in init method, where we want to create the first writer and @@ -2084,7 +2084,7 @@ protected void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) throws protected void doShutdown() throws IOException { waitForSafePoint(); /** - * For {@link FSHLog},here would shutdown {@link FSHLog.SyncRunner}. + * For {@link FSHLog},here would shut down {@link FSHLog.SyncRunner}. */ doCleanUpResources(); if (this.writer != null) { @@ -2214,7 +2214,7 @@ public void checkLogLowReplication(long checkInterval) { // So here we need to skip the creation of remote writer and make it possible to write the region // close marker. // Setting markerEdit only to true is for transiting from A to S, where we need to give up writing - // any pending wal entries as they will be discarded. The remote cluster will replicated the + // any pending wal entries as they will be discarded. The remote cluster will replicate the // correct data back later. We still need to allow writing marker edits such as close region event // to allow closing a region. @Override @@ -2261,7 +2261,7 @@ private static void usage() { } /** - * Pass one or more log file names and it will either dump out a text version on + * Pass one or more log file names, and it will either dump out a text version on * stdout or split the specified log files. */ public static void main(String[] args) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java index f5e65e08c84c..5d51750ba5f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java @@ -121,7 +121,7 @@ public abstract class AbstractProtobufWALReader * Get or create the input stream used by cell decoder. *

* For implementing replication, we may need to limit the bytes we can read, so here we provide a - * method so sub classes can wrap the original input stream. + * method so subclasses can wrap the original input stream. */ protected abstract InputStream getCellCodecInputStream(FSDataInputStream stream); @@ -366,7 +366,7 @@ protected final void readTrailer(FSDataInputStream stream, FileStatus stat) thro this.fileLength = stat.getLen(); this.walEditsStopOffset = this.fileLength; long currentPos = stream.getPos(); - // we will reset walEditsStopOffset if trailer if available + // we will reset walEditsStopOffset if trailer is available trailerPresent = setTrailerIfPresent(stream); if (currentPos != stream.getPos()) { // seek back @@ -509,18 +509,18 @@ protected final IOException extractHiddenEof(Exception ex) { * This is used to determine whether we have already reached the WALTrailer. As the size and magic * are at the end of the WAL file, it is possible that these two options are missing while * writing, so we will consider there is no trailer. And when we actually reach the WALTrailer, we - * will try to decode it as WALKey and we will fail but the error could be vary as it is parsing + * will try to decode it as WALKey and we will fail but the error could be varied as it is parsing * WALTrailer actually. * @return whether this is a WALTrailer and we should throw EOF to upper layer the file is done */ protected final boolean isWALTrailer(long startPosition) throws IOException { - // We have nothing in the WALTrailer PB message now so its size is just a int length size and a + // We have nothing in the WALTrailer PB message now so its size is just an int length size and a // magic at the end int trailerSize = PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT; if (fileLength - startPosition >= trailerSize) { // We still have more than trailerSize bytes before reaching the EOF so this is not a trailer. // We also test for == here because if this is a valid trailer, we can read it while opening - // the reader so we should not reach here + // the reader, so we should not reach here return false; } inputStream.seek(startPosition); @@ -548,7 +548,7 @@ protected final boolean isWALTrailer(long startPosition) throws IOException { return false; } } - // in fact we should not reach here, as this means the trailer bytes are all matched and + // in fact, we should not reach here, as this means the trailer bytes are all matched and // complete, then we should not call this method... return true; } From b87b05c847f00c292664d894c21f83c73d48460d Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 25 Jan 2024 10:16:27 +0100 Subject: [PATCH 225/514] Revert "HBASE-28325 Enable infra automation to comment on a Jira when a new PR is posted" --- .asf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.asf.yaml b/.asf.yaml index 3b7cf932e3b6..a991e8528d78 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -38,4 +38,4 @@ notifications: commits: commits@hbase.apache.org issues: issues@hbase.apache.org pullrequests: issues@hbase.apache.org - jira_options: link comment + jira_options: link From ef1ddc94c727528a644e2d94be28e845ec72d7a4 Mon Sep 17 00:00:00 2001 From: eab148 <54775485+eab148@users.noreply.github.com> Date: Thu, 25 Jan 2024 08:17:25 -0500 Subject: [PATCH 226/514] HBASE-28327 Add remove(String key, Metric metric) method to MetricRegistry interface (#5647) Co-authored-by: Evie Boland Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/metrics/MetricRegistry.java | 8 ++++++++ .../metrics/impl/MetricRegistryImpl.java | 5 +++++ .../metrics/impl/TestMetricRegistryImpl.java | 19 +++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java index b70526e1c5a9..96f4b313d794 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java @@ -96,6 +96,14 @@ public interface MetricRegistry extends MetricSet { */ boolean remove(String name); + /** + * Removes the metric with the given name only if it is registered to the provided metric. + * @param name the name of the metric + * @param metric the metric expected to be registered to the given name + * @return true if the metric is removed. + */ + boolean remove(String name, Metric metric); + /** * Return the MetricRegistryInfo object for this registry. * @return MetricRegistryInfo describing the registry. diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java index 1c8927b15b3a..caea02740c60 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java @@ -114,6 +114,11 @@ public boolean remove(String name) { return metrics.remove(name) != null; } + @Override + public boolean remove(String name, Metric metric) { + return metrics.remove(name, metric); + } + @Override public MetricRegistryInfo getMetricRegistryInfo() { return info; diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java index 56b3f0d6a9ee..8cae06361204 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.metrics.impl; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -141,4 +142,22 @@ public void testGetMetrics() { assertEquals(gauge, metrics.get("mygauge")); assertEquals(timer, metrics.get("mytimer")); } + + @Test + public void testRemove() { + CounterImpl counter1 = new CounterImpl(); + CounterImpl counter2 = new CounterImpl(); + registry.register("mycounter", counter1); + + boolean removed = registry.remove("mycounter", counter2); + Optional metric = registry.get("mycounter"); + assertFalse(removed); + assertTrue(metric.isPresent()); + assertEquals(metric.get(), counter1); + + removed = registry.remove("mycounter"); + metric = registry.get("mycounter"); + assertTrue(removed); + assertFalse(metric.isPresent()); + } } From 1c5b5ffcf5de8edeb28372f3c828dc0478e700ae Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 25 Jan 2024 09:30:47 -0500 Subject: [PATCH 227/514] HBASE-28315 Remove noisy WARN from trying to construct MetricsServlet (#5651) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/http/HttpServer.java | 19 ++++++++++++------- .../hadoop/hbase/http/ServletConfig.java | 16 +++++++++++++--- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 6bbbc5608a0d..d5af8df1c7fd 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -171,7 +171,9 @@ public class HttpServer implements FilterContainer { .put("jmx", new ServletConfig("jmx", "/jmx", "org.apache.hadoop.hbase.http.jmx.JMXJsonServlet")) .put("metrics", - new ServletConfig("metrics", "/metrics", "org.apache.hadoop.metrics.MetricsServlet")) + // MetricsServlet is deprecated in hadoop 2.8 and removed in 3.0. We shouldn't expect it, + // so pass false so that we don't create a noisy warn during instantiation. + new ServletConfig("metrics", "/metrics", "org.apache.hadoop.metrics.MetricsServlet", false)) .put("prometheus", new ServletConfig("prometheus", "/prometheus", "org.apache.hadoop.hbase.http.prometheus.PrometheusHadoopServlet")) .build(); @@ -846,16 +848,19 @@ protected void addDefaultServlets(ContextHandlerCollection contexts, Configurati /* register metrics servlets */ String[] enabledServlets = conf.getStrings(METRIC_SERVLETS_CONF_KEY, METRICS_SERVLETS_DEFAULT); for (String enabledServlet : enabledServlets) { - try { - ServletConfig servletConfig = METRIC_SERVLETS.get(enabledServlet); - if (servletConfig != null) { + ServletConfig servletConfig = METRIC_SERVLETS.get(enabledServlet); + if (servletConfig != null) { + try { Class clz = Class.forName(servletConfig.getClazz()); addPrivilegedServlet(servletConfig.getName(), servletConfig.getPathSpec(), clz.asSubclass(HttpServlet.class)); + } catch (Exception e) { + if (servletConfig.isExpected()) { + // metrics are not critical to read/write, so an exception here shouldn't be fatal + // if the class was expected we should warn though + LOG.warn("Couldn't register the servlet " + enabledServlet, e); + } } - } catch (Exception e) { - /* shouldn't be fatal, so warn the user about it */ - LOG.warn("Couldn't register the servlet " + enabledServlet, e); } } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServletConfig.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServletConfig.java index befe60957605..366dbfd9f228 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServletConfig.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServletConfig.java @@ -23,14 +23,20 @@ @InterfaceAudience.Private class ServletConfig { - private String name; - private String pathSpec; - private String clazz; + private final String name; + private final String pathSpec; + private final String clazz; + private final boolean expected; public ServletConfig(String name, String pathSpec, String clazz) { + this(name, pathSpec, clazz, true); + } + + public ServletConfig(String name, String pathSpec, String clazz, boolean expected) { this.name = name; this.pathSpec = pathSpec; this.clazz = clazz; + this.expected = expected; } public String getName() { @@ -44,4 +50,8 @@ public String getPathSpec() { public String getClazz() { return clazz; } + + public boolean isExpected() { + return expected; + } } From 73cb0dddf8bcd0768e2e8eb7cf9d1bc3eddc1ea1 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Fri, 26 Jan 2024 10:03:42 +0000 Subject: [PATCH 228/514] HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is reached (#5615) Signed-off-by: Tak Lon (Stephen) Wu Signed-off-by: Peter Somogyi --- .../hadoop/hbase/io/hfile/CacheConfig.java | 28 ++++++++++++++ .../hbase/io/hfile/HFilePreadReader.java | 24 ++++++++---- .../hadoop/hbase/io/hfile/TestPrefetch.java | 38 +++++++++++++++++++ .../bucket/TestBucketCachePersister.java | 16 ++++---- 4 files changed, 92 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 4587eced6163..f89a6194cefb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -99,6 +99,12 @@ public class CacheConfig { public static final String BUCKETCACHE_PERSIST_INTERVAL_KEY = "hbase.bucketcache.persist.intervalinmillis"; + /** + * Configuration key to set the heap usage threshold limit once prefetch threads should be + * interrupted. + */ + public static final String PREFETCH_HEAP_USAGE_THRESHOLD = "hbase.rs.prefetchheapusage"; + // Defaults public static final boolean DEFAULT_CACHE_DATA_ON_READ = true; public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; @@ -111,6 +117,7 @@ public class CacheConfig { public static final boolean DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE = false; public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true; public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = Long.MAX_VALUE; + public static final double DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD = 1d; /** * Whether blocks should be cached on read (default is on if there is a cache but this can be @@ -157,6 +164,8 @@ public class CacheConfig { private final ByteBuffAllocator byteBuffAllocator; + private final double heapUsageThreshold; + /** * Create a cache configuration using the specified configuration object and defaults for family * level settings. Only use if no column family context. @@ -201,6 +210,8 @@ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); + this.heapUsageThreshold = + conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD); this.blockCache = blockCache; this.byteBuffAllocator = byteBuffAllocator; } @@ -222,6 +233,7 @@ public CacheConfig(CacheConfig cacheConf) { this.dropBehindCompaction = cacheConf.dropBehindCompaction; this.blockCache = cacheConf.blockCache; this.byteBuffAllocator = cacheConf.byteBuffAllocator; + this.heapUsageThreshold = cacheConf.heapUsageThreshold; } private CacheConfig() { @@ -237,6 +249,7 @@ private CacheConfig() { this.dropBehindCompaction = false; this.blockCache = null; this.byteBuffAllocator = ByteBuffAllocator.HEAP; + this.heapUsageThreshold = DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD; } /** @@ -386,6 +399,17 @@ public boolean shouldReadBlockFromCache(BlockType blockType) { return false; } + /** + * Checks if the current heap usage is below the threshold configured by + * "hbase.rs.prefetchheapusage" (0.8 by default). + */ + public boolean isHeapUsageBelowThreshold() { + double total = Runtime.getRuntime().maxMemory(); + double available = Runtime.getRuntime().freeMemory(); + double usedRatio = 1d - (available / total); + return heapUsageThreshold > usedRatio; + } + /** * If we make sure the block could not be cached, we will not acquire the lock otherwise we will * acquire lock @@ -413,6 +437,10 @@ public ByteBuffAllocator getByteBuffAllocator() { return this.byteBuffAllocator; } + public double getHeapUsageThreshold() { + return heapUsageThreshold; + } + private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) { long cacheCompactedBlocksOnWriteThreshold = conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 92f6a8169f32..6063ffe68891 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -106,13 +106,23 @@ public void run() { HFileBlock block = prefetchStreamReader.readBlock(offset, onDiskSizeOfNextBlock, /* cacheBlock= */true, /* pread= */false, false, false, null, null, true); try { - if (!cacheConf.isInMemory() && !cache.blockFitsIntoTheCache(block).orElse(true)) { - LOG.warn( - "Interrupting prefetch for file {} because block {} of size {} " - + "doesn't fit in the available cache space.", - path, cacheKey, block.getOnDiskSizeWithHeader()); - interrupted = true; - break; + if (!cacheConf.isInMemory()) { + if (!cache.blockFitsIntoTheCache(block).orElse(true)) { + LOG.warn( + "Interrupting prefetch for file {} because block {} of size {} " + + "doesn't fit in the available cache space.", + path, cacheKey, block.getOnDiskSizeWithHeader()); + interrupted = true; + break; + } + if (!cacheConf.isHeapUsageBelowThreshold()) { + LOG.warn( + "Interrupting prefetch because heap usage is above the threshold: {} " + + "configured via {}", + cacheConf.getHeapUsageThreshold(), CacheConfig.PREFETCH_HEAP_USAGE_THRESHOLD); + interrupted = true; + break; + } } onDiskSizeOfNextBlock = block.getNextBlockOnDiskSize(); offset += block.getOnDiskSizeWithHeader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 0b45a930dceb..85b9199638c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -42,6 +42,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import org.apache.commons.lang3.mutable.MutableInt; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -156,6 +157,43 @@ public void testPrefetchBlockCacheDisabled() throws Exception { poolExecutor.getCompletedTaskCount() + poolExecutor.getQueue().size()); } + @Test + public void testPrefetchHeapUsageAboveThreshold() throws Exception { + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setPrefetchBlocksOnOpen(true) + .setBlockCacheEnabled(true).build(); + HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); + Configuration newConf = new Configuration(conf); + newConf.setDouble(CacheConfig.PREFETCH_HEAP_USAGE_THRESHOLD, 0.1); + CacheConfig cacheConfig = + new CacheConfig(newConf, columnFamilyDescriptor, blockCache, ByteBuffAllocator.HEAP); + Path storeFile = writeStoreFile("testPrefetchHeapUsageAboveThreshold", meta, cacheConfig); + MutableInt cachedCount = new MutableInt(0); + MutableInt unCachedCount = new MutableInt(0); + readStoreFile(storeFile, (r, o) -> { + HFileBlock block = null; + try { + block = r.readBlock(o, -1, false, true, false, true, null, null); + } catch (IOException e) { + fail(e.getMessage()); + } + return block; + }, (key, block) -> { + boolean isCached = blockCache.getBlock(key, true, false, true) != null; + if ( + block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX + || block.getBlockType() == BlockType.INTERMEDIATE_INDEX + ) { + if (isCached) { + cachedCount.increment(); + } else { + unCachedCount.increment(); + } + } + }, cacheConfig); + assertTrue(unCachedCount.compareTo(cachedCount) > 0); + } + @Test public void testPrefetch() throws Exception { TraceUtil.trace(() -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java index f6d3efa9015d..a39df7e14715 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java @@ -86,9 +86,10 @@ public Configuration setupBucketCacheConfig(long bucketCachePersistInterval) thr return conf; } - public BucketCache setupBucketCache(Configuration conf) throws IOException { - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, + public BucketCache setupBucketCache(Configuration conf, String persistentCacheFile) + throws IOException { + BucketCache bucketCache = new BucketCache("file:" + testDir + "/" + persistentCacheFile, + capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", 60 * 1000, conf); return bucketCache; } @@ -103,7 +104,7 @@ public void cleanupBucketCache(BucketCache bucketCache) throws IOException { public void testPrefetchPersistenceCrash() throws Exception { long bucketCachePersistInterval = 3000; Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval); - BucketCache bucketCache = setupBucketCache(conf); + BucketCache bucketCache = setupBucketCache(conf, "testPrefetchPersistenceCrash"); CacheConfig cacheConf = new CacheConfig(conf, bucketCache); FileSystem fs = HFileSystem.get(conf); // Load Cache @@ -121,7 +122,7 @@ public void testPrefetchPersistenceCrash() throws Exception { public void testPrefetchPersistenceCrashNegative() throws Exception { long bucketCachePersistInterval = Long.MAX_VALUE; Configuration conf = setupBucketCacheConfig(bucketCachePersistInterval); - BucketCache bucketCache = setupBucketCache(conf); + BucketCache bucketCache = setupBucketCache(conf, "testPrefetchPersistenceCrashNegative"); CacheConfig cacheConf = new CacheConfig(conf, bucketCache); FileSystem fs = HFileSystem.get(conf); // Load Cache @@ -134,7 +135,7 @@ public void testPrefetchPersistenceCrashNegative() throws Exception { @Test public void testPrefetchListUponBlockEviction() throws Exception { Configuration conf = setupBucketCacheConfig(200); - BucketCache bucketCache = setupBucketCache(conf); + BucketCache bucketCache = setupBucketCache(conf, "testPrefetchListUponBlockEviction"); CacheConfig cacheConf = new CacheConfig(conf, bucketCache); FileSystem fs = HFileSystem.get(conf); // Load Blocks in cache @@ -156,7 +157,8 @@ public void testPrefetchListUponBlockEviction() throws Exception { @Test public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception { Configuration conf = setupBucketCacheConfig(200); - BucketCache bucketCache = setupBucketCache(conf); + BucketCache bucketCache = + setupBucketCache(conf, "testPrefetchBlockEvictionWhilePrefetchRunning"); CacheConfig cacheConf = new CacheConfig(conf, bucketCache); FileSystem fs = HFileSystem.get(conf); // Load Blocks in cache From addb4e7fc3c0b6e486281033499b6de1f368d686 Mon Sep 17 00:00:00 2001 From: guluo Date: Fri, 26 Jan 2024 18:17:22 +0800 Subject: [PATCH 229/514] HBASE-28332 Type conversion is no need in method CompactionChecker.chore() (#5653) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/regionserver/HRegionServer.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index a77fa0cd879e..dfb8e2a204fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1684,14 +1684,13 @@ private static class CompactionChecker extends ScheduledChore { @Override protected void chore() { - for (Region r : this.instance.onlineRegions.values()) { + for (HRegion hr : this.instance.onlineRegions.values()) { // If region is read only or compaction is disabled at table level, there's no need to // iterate through region's stores - if (r == null || r.isReadOnly() || !r.getTableDescriptor().isCompactionEnabled()) { + if (hr == null || hr.isReadOnly() || !hr.getTableDescriptor().isCompactionEnabled()) { continue; } - HRegion hr = (HRegion) r; for (HStore s : hr.stores.values()) { try { long multiplier = s.getCompactionCheckMultiplier(); @@ -1719,7 +1718,7 @@ protected void chore() { } } } catch (IOException e) { - LOG.warn("Failed major compaction check on " + r, e); + LOG.warn("Failed major compaction check on " + hr, e); } } } From 9c79a7f267e974d36745ef5ec2adbbad871fd64c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 26 Jan 2024 18:53:39 +0800 Subject: [PATCH 230/514] HBASE-28331 Client integration test fails after upgrading hadoop3 version to 3.3.x (#5652) Signed-off-by: Xin Sun --- dev-support/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 7d2ef32df6eb..9c03d97a3c44 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -152,7 +152,7 @@ pipeline { } stage ('hadoop 3 cache') { environment { - HADOOP3_VERSION="3.1.1" + HADOOP3_VERSION="3.3.5" } steps { // directory must be unique for each parallel stage, because jenkins runs them in the same workspace :( From 11458ec57a6f756510ea6f6aa316290ccb88b694 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 26 Jan 2024 21:36:29 +0800 Subject: [PATCH 231/514] HBASE-28333 Refactor TestClientTimeouts to make it more clear that what we want to test (#5655) Signed-off-by: Xin Sun --- .../hbase/client/TestClientTimeouts.java | 67 +++++++------------ 1 file changed, 26 insertions(+), 41 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index d358695c5f9b..9a92f4b1aa55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.IOException; import java.net.SocketAddress; import java.net.SocketTimeoutException; import java.util.Map; @@ -31,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; import org.apache.hadoop.hbase.ipc.AbstractRpcClient; import org.apache.hadoop.hbase.ipc.BlockingRpcClient; import org.apache.hadoop.hbase.ipc.HBaseRpcController; @@ -67,9 +67,6 @@ public class TestClientTimeouts { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); - // Set the custom RPC client with random timeouts as the client - TEST_UTIL.getConfiguration().set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, - RandomTimeoutRpcClient.class.getName()); } @AfterClass @@ -77,51 +74,39 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + private Connection createConnection() { + // Ensure the HBaseAdmin uses a new connection by changing Configuration. + Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); + // Set the custom RPC client with random timeouts as the client + conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, + RandomTimeoutRpcClient.class.getName()); + conf.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1)); + for (;;) { + try { + return ConnectionFactory.createConnection(conf); + } catch (IOException e) { + // since we randomly throw SocketTimeoutException, it is possible that we fail when creating + // the Connection, but this is not what we want to test here, so just ignore it and try + // again + } + } + } + /** * Test that a client that fails an RPC to the master retries properly and doesn't throw any * unexpected exceptions. */ @Test public void testAdminTimeout() throws Exception { - boolean lastFailed = false; - int initialInvocations = invokations.get(); - RandomTimeoutRpcClient rpcClient = (RandomTimeoutRpcClient) RpcClientFactory - .createClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()); - - try { - for (int i = 0; i < 5 || (lastFailed && i < 100); ++i) { - lastFailed = false; - // Ensure the HBaseAdmin uses a new connection by changing Configuration. - Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); - conf.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1)); - Admin admin = null; - Connection connection = null; - try { - connection = ConnectionFactory.createConnection(conf); - admin = connection.getAdmin(); - admin.balancerSwitch(false, false); - } catch (MasterRegistryFetchException ex) { - // Since we are randomly throwing SocketTimeoutExceptions, it is possible to get - // a MasterRegistryFetchException. It's a bug if we get other exceptions. - lastFailed = true; - } finally { - if (admin != null) { - admin.close(); - if (admin.getConnection().isClosed()) { - rpcClient = (RandomTimeoutRpcClient) RpcClientFactory - .createClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()); - } - } - if (connection != null) { - connection.close(); - } - } + try (Connection conn = createConnection(); Admin admin = conn.getAdmin()) { + int initialInvocations = invokations.get(); + boolean balanceEnabled = admin.isBalancerEnabled(); + for (int i = 0; i < 5; i++) { + assertEquals(balanceEnabled, admin.balancerSwitch(!balanceEnabled, false)); + balanceEnabled = !balanceEnabled; } // Ensure the RandomTimeoutRpcEngine is actually being used. - assertFalse(lastFailed); assertTrue(invokations.get() > initialInvocations); - } finally { - rpcClient.close(); } } From d82a892fb8d50d348a0d1e80b3d5e147d195f3e6 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 26 Jan 2024 23:50:03 +0800 Subject: [PATCH 232/514] HBASE-28325 Enable infra automation to add label on a Jira when a new PR is posted (#5656) Signed-off-by: Bryan Beaudreault --- .asf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.asf.yaml b/.asf.yaml index a991e8528d78..d1abb7131437 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -38,4 +38,4 @@ notifications: commits: commits@hbase.apache.org issues: issues@hbase.apache.org pullrequests: issues@hbase.apache.org - jira_options: link + jira_options: link label From 3b2e98192091368dc961eaef8e25c679f99aa8fd Mon Sep 17 00:00:00 2001 From: Rahul Kumar Date: Sun, 28 Jan 2024 00:51:49 +0530 Subject: [PATCH 233/514] HBASE-28151 Option to allow/disallow bypassing pre transit check for assing/unassign (#5493) Signed-off-by: Andrew Purtell Signed-off-by: Viraj Jasani Signed-off-by: Ravi Kishore Valeti --- .../apache/hadoop/hbase/client/HBaseHbck.java | 9 ++-- .../org/apache/hadoop/hbase/client/Hbck.java | 43 +++++++++++++------ .../shaded/protobuf/RequestConverter.java | 8 ++-- .../main/protobuf/server/master/Master.proto | 2 + .../hbase/master/MasterRpcServices.java | 9 ++-- .../master/assignment/AssignmentManager.java | 22 +++++++--- .../procedure/TruncateRegionProcedure.java | 4 +- .../apache/hadoop/hbase/client/TestHbck.java | 19 +++++++- .../master/assignment/TestRegionBypass.java | 2 +- 9 files changed, 81 insertions(+), 37 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java index 8df0504b2a9a..83b53ccba3c3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java @@ -135,10 +135,11 @@ public Map setRegionStateInMeta( } @Override - public List assigns(List encodedRegionNames, boolean override) throws IOException { + public List assigns(List encodedRegionNames, boolean override, boolean force) + throws IOException { try { AssignsResponse response = this.hbck.assigns(rpcControllerFactory.newController(), - RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toAssignRegionsRequest(encodedRegionNames, override, force)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -147,11 +148,11 @@ public List assigns(List encodedRegionNames, boolean override) thr } @Override - public List unassigns(List encodedRegionNames, boolean override) + public List unassigns(List encodedRegionNames, boolean override, boolean force) throws IOException { try { UnassignsResponse response = this.hbck.unassigns(rpcControllerFactory.newController(), - RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override, force)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index b5ba25058838..6baa876f9387 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -62,19 +62,26 @@ public interface Hbck extends Abortable, Closeable { * good if many Regions to online -- and it will schedule the assigns even in the case where * Master is initializing (as long as the ProcedureExecutor is up). Does NOT call Coprocessor * hooks. - * @param override You need to add the override for case where a region has previously - * been bypassed. When a Procedure has been bypassed, a Procedure will - * have completed but no other Procedure will be able to make progress - * on the target entity (intentionally). This override flag will - * override this fencing mechanism. + * @param override You need to add override for unset of the procedure from + * RegionStateNode without byPassing preTransitCheck + * @param force You need to add force for case where a region has previously been + * bypassed. When a Procedure has been bypassed, a Procedure will have + * completed but no other Procedure will be able to make progress on the + * target entity (intentionally). Skips preTransitCheck only when + * selected along with override option * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example * of what a random user-space encoded Region name looks like. */ - List assigns(List encodedRegionNames, boolean override) throws IOException; + List assigns(List encodedRegionNames, boolean override, boolean force) + throws IOException; + + default List assigns(List encodedRegionNames, boolean override) throws IOException { + return assigns(encodedRegionNames, override, true); + } default List assigns(List encodedRegionNames) throws IOException { - return assigns(encodedRegionNames, false); + return assigns(encodedRegionNames, false, false); } /** @@ -82,19 +89,27 @@ default List assigns(List encodedRegionNames) throws IOException { * at a time -- good if many Regions to offline -- and it will schedule the assigns even in the * case where Master is initializing (as long as the ProcedureExecutor is up). Does NOT call * Coprocessor hooks. - * @param override You need to add the override for case where a region has previously - * been bypassed. When a Procedure has been bypassed, a Procedure will - * have completed but no other Procedure will be able to make progress - * on the target entity (intentionally). This override flag will - * override this fencing mechanism. + * @param override You need to add override for unset of the procedure from + * RegionStateNode without byPassing preTransitCheck + * @param force You need to add force for case where a region has previously been + * bypassed. When a Procedure has been bypassed, a Procedure will have + * completed but no other Procedure will be able to make progress on the + * target entity (intentionally). Skips preTransitCheck only when + * selected along with override option * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example * of what a random user-space encoded Region name looks like. */ - List unassigns(List encodedRegionNames, boolean override) throws IOException; + List unassigns(List encodedRegionNames, boolean override, boolean force) + throws IOException; + + default List unassigns(List encodedRegionNames, boolean override) + throws IOException { + return unassigns(encodedRegionNames, override, true); + } default List unassigns(List encodedRegionNames) throws IOException { - return unassigns(encodedRegionNames, false); + return unassigns(encodedRegionNames, false, true); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 377b46494633..ce12aaea0d24 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1590,17 +1590,17 @@ private static List toProtoServerNames(List // HBCK2 public static MasterProtos.AssignsRequest toAssignRegionsRequest(List encodedRegionNames, - boolean override) { + boolean override, boolean force) { MasterProtos.AssignsRequest.Builder b = MasterProtos.AssignsRequest.newBuilder(); return b.addAllRegion(toEncodedRegionNameRegionSpecifiers(encodedRegionNames)) - .setOverride(override).build(); + .setOverride(override).setForce(force).build(); } public static MasterProtos.UnassignsRequest - toUnassignRegionsRequest(List encodedRegionNames, boolean override) { + toUnassignRegionsRequest(List encodedRegionNames, boolean override, boolean force) { MasterProtos.UnassignsRequest.Builder b = MasterProtos.UnassignsRequest.newBuilder(); return b.addAllRegion(toEncodedRegionNameRegionSpecifiers(encodedRegionNames)) - .setOverride(override).build(); + .setOverride(override).setForce(force).build(); } public static MasterProtos.ScheduleServerCrashProcedureRequest diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index b1e750f4d920..a8adaa27453f 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -1302,6 +1302,7 @@ message SetRegionStateInMetaResponse { message AssignsRequest { repeated RegionSpecifier region = 1; optional bool override = 2 [default = false]; + optional bool force = 3 [default = false]; } /** Like Admin's AssignRegionResponse except it can @@ -1317,6 +1318,7 @@ message AssignsResponse { message UnassignsRequest { repeated RegionSpecifier region = 1; optional bool override = 2 [default = false]; + optional bool force= 3 [default = false]; } /** Like Admin's UnassignRegionResponse except it can diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 6d330d6eb791..1da8e03d179e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2714,6 +2714,7 @@ public MasterProtos.AssignsResponse assigns(RpcController controller, MasterProtos.AssignsResponse.Builder responseBuilder = MasterProtos.AssignsResponse.newBuilder(); final boolean override = request.getOverride(); + final boolean force = request.getForce(); LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override); for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) { final RegionInfo info = getRegionInfo(rs); @@ -2721,7 +2722,7 @@ public MasterProtos.AssignsResponse assigns(RpcController controller, LOG.info("Unknown region {}", rs); continue; } - responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override)) + responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override, force)) .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); } return responseBuilder.build(); @@ -2741,6 +2742,7 @@ public MasterProtos.UnassignsResponse unassigns(RpcController controller, MasterProtos.UnassignsResponse.Builder responseBuilder = MasterProtos.UnassignsResponse.newBuilder(); final boolean override = request.getOverride(); + final boolean force = request.getForce(); LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override); for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) { final RegionInfo info = getRegionInfo(rs); @@ -2748,8 +2750,9 @@ public MasterProtos.UnassignsResponse unassigns(RpcController controller, LOG.info("Unknown region {}", rs); continue; } - responseBuilder.addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override)) - .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); + responseBuilder + .addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override, force)) + .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); } return responseBuilder.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 49cf29ee61d9..9cee9f87ce2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -765,11 +765,14 @@ private void preTransitCheck(RegionStateNode regionNode, RegionState.State[] exp * @param override If false, check RegionState is appropriate for assign; if not throw exception. */ private TransitRegionStateProcedure createAssignProcedure(RegionInfo regionInfo, ServerName sn, - boolean override) throws IOException { + boolean override, boolean force) throws IOException { RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionInfo); regionNode.lock(); try { if (override) { + if (!force) { + preTransitCheck(regionNode, STATES_EXPECTED_ON_ASSIGN); + } if (regionNode.getProcedure() != null) { regionNode.unsetProcedure(regionNode.getProcedure()); } @@ -787,7 +790,7 @@ private TransitRegionStateProcedure createAssignProcedure(RegionInfo regionInfo, /** * Create an assign TransitRegionStateProcedure. Does NO checking of RegionState. Presumes * appriopriate state ripe for assign. - * @see #createAssignProcedure(RegionInfo, ServerName, boolean) + * @see #createAssignProcedure(RegionInfo, ServerName, boolean, boolean) */ private TransitRegionStateProcedure createAssignProcedure(RegionStateNode regionNode, ServerName targetServer) { @@ -801,7 +804,7 @@ private TransitRegionStateProcedure createAssignProcedure(RegionStateNode region } public long assign(RegionInfo regionInfo, ServerName sn) throws IOException { - TransitRegionStateProcedure proc = createAssignProcedure(regionInfo, sn, false); + TransitRegionStateProcedure proc = createAssignProcedure(regionInfo, sn, false, false); ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), proc); return proc.getProcId(); } @@ -817,7 +820,7 @@ public long assign(RegionInfo regionInfo) throws IOException { */ public Future assignAsync(RegionInfo regionInfo, ServerName sn) throws IOException { return ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), - createAssignProcedure(regionInfo, sn, false)); + createAssignProcedure(regionInfo, sn, false, false)); } /** @@ -961,10 +964,11 @@ static int compare(TransitRegionStateProcedure left, TransitRegionStateProcedure * method is called from HBCK2. * @return an assign or null */ - public TransitRegionStateProcedure createOneAssignProcedure(RegionInfo ri, boolean override) { + public TransitRegionStateProcedure createOneAssignProcedure(RegionInfo ri, boolean override, + boolean force) { TransitRegionStateProcedure trsp = null; try { - trsp = createAssignProcedure(ri, null, override); + trsp = createAssignProcedure(ri, null, override, force); } catch (IOException ioe) { LOG.info( "Failed {} assign, override={}" @@ -978,12 +982,16 @@ public TransitRegionStateProcedure createOneAssignProcedure(RegionInfo ri, boole * Create one TransitRegionStateProcedure to unassign a region. This method is called from HBCK2. * @return an unassign or null */ - public TransitRegionStateProcedure createOneUnassignProcedure(RegionInfo ri, boolean override) { + public TransitRegionStateProcedure createOneUnassignProcedure(RegionInfo ri, boolean override, + boolean force) { RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(ri); TransitRegionStateProcedure trsp = null; regionNode.lock(); try { if (override) { + if (!force) { + preTransitCheck(regionNode, STATES_EXPECTED_ON_UNASSIGN_OR_MOVE); + } if (regionNode.getProcedure() != null) { regionNode.unsetProcedure(regionNode.getProcedure()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java index 9730391baf22..83722d6c1dca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -210,10 +210,10 @@ public TableOperationType getTableOperationType() { private TransitRegionStateProcedure createUnAssignProcedures(MasterProcedureEnv env) throws IOException { - return env.getAssignmentManager().createOneUnassignProcedure(getRegion(), true); + return env.getAssignmentManager().createOneUnassignProcedure(getRegion(), true, true); } private TransitRegionStateProcedure createAssignProcedures(MasterProcedureEnv env) { - return env.getAssignmentManager().createOneAssignProcedure(getRegion(), true); + return env.getAssignmentManager().createOneAssignProcedure(getRegion(), true, true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java index 406b25fed4e0..360641e64b7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java @@ -250,11 +250,20 @@ public void testAssigns() throws Exception { for (long pid : pids) { assertEquals(Procedure.NO_PROC_ID, pid); } - // If we pass override, then we should be able to unassign EVEN THOUGH Regions already + // Rerun the unassign with override. Should fail for all Regions since they already + // unassigned; failed + // unassign will manifest as all pids being -1 (ever since HBASE-24885). + pids = hbck.unassigns( + regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()), true, false); + waitOnPids(pids); + for (long pid : pids) { + assertEquals(Procedure.NO_PROC_ID, pid); + } + // If we pass force, then we should be able to unassign EVEN THOUGH Regions already // unassigned.... makes for a mess but operator might want to do this at an extreme when // doing fixup of broke cluster. pids = hbck.unassigns( - regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()), true); + regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()), true, true); waitOnPids(pids); for (long pid : pids) { assertNotEquals(Procedure.NO_PROC_ID, pid); @@ -283,6 +292,12 @@ public void testAssigns() throws Exception { LOG.info("RS: {}", rs.toString()); assertTrue(rs.toString(), rs.isOpened()); } + // Rerun the assign with override. Should fail for all Regions since they already assigned + pids = hbck.assigns( + regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()), true, false); + for (long pid : pids) { + assertEquals(Procedure.NO_PROC_ID, pid); + } // What happens if crappy region list passed? pids = hbck.assigns( Arrays.stream(new String[] { "a", "some rubbish name" }).collect(Collectors.toList())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java index 61520873240c..8295da82f49c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java @@ -125,7 +125,7 @@ public void testBypass() throws IOException, InterruptedException { .getMasterProcedureExecutor().getActiveProcIds().isEmpty()); // Now assign with the override flag. for (RegionInfo ri : regions) { - TEST_UTIL.getHbck().assigns(Arrays. asList(ri.getEncodedName()), true); + TEST_UTIL.getHbck().assigns(Arrays. asList(ri.getEncodedName()), true, true); } TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getMaster() .getMasterProcedureExecutor().getActiveProcIds().isEmpty()); From 97574a265b31a4b8ecdbad1709c0226cb3212825 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Mon, 29 Jan 2024 08:04:06 -0500 Subject: [PATCH 234/514] HBASE-28317 Expose client TLS certificate on RpcCallContext (#5644) Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/ipc/NettyRpcServer.java | 31 +++++++++++++++++-- .../hadoop/hbase/ipc/RpcCallContext.java | 8 +++++ .../apache/hadoop/hbase/ipc/ServerCall.java | 9 ++++++ .../hadoop/hbase/ipc/ServerRpcConnection.java | 2 ++ .../namequeues/TestNamedQueueRecorder.java | 6 ++++ .../hbase/namequeues/TestRpcLogDetails.java | 6 ++++ .../region/TestRegionProcedureStore.java | 6 ++++ 7 files changed, 65 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index ceff84a90e11..c291338e40c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -27,9 +27,12 @@ import java.io.InterruptedIOException; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.SSLPeerUnverifiedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseServerBase; @@ -166,10 +169,10 @@ protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); preambleDecoder.setSingleDecode(true); + NettyServerRpcConnection conn = createNettyServerRpcConnection(ch); if (conf.getBoolean(HBASE_SERVER_NETTY_TLS_ENABLED, false)) { - initSSL(pipeline, conf.getBoolean(HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT, true)); + initSSL(pipeline, conn, conf.getBoolean(HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT, true)); } - NettyServerRpcConnection conn = createNettyServerRpcConnection(ch); pipeline.addLast(NettyRpcServerPreambleHandler.DECODER_NAME, preambleDecoder) .addLast(new NettyRpcServerPreambleHandler(NettyRpcServer.this, conn)) // We need NettyRpcServerResponseEncoder here because NettyRpcServerPreambleHandler may @@ -378,7 +381,7 @@ public int getNumOpenConnections() { return allChannels.size(); } - private void initSSL(ChannelPipeline p, boolean supportPlaintext) + private void initSSL(ChannelPipeline p, NettyServerRpcConnection conn, boolean supportPlaintext) throws X509Exception, IOException { SslContext nettySslContext = getSslContext(); @@ -413,6 +416,28 @@ private void initSSL(ChannelPipeline p, boolean supportPlaintext) sslHandler.setWrapDataSize( conf.getInt(HBASE_SERVER_NETTY_TLS_WRAP_SIZE, DEFAULT_HBASE_SERVER_NETTY_TLS_WRAP_SIZE)); + sslHandler.handshakeFuture().addListener(future -> { + try { + Certificate[] certificates = sslHandler.engine().getSession().getPeerCertificates(); + if (certificates != null && certificates.length > 0) { + conn.clientCertificateChain = (X509Certificate[]) certificates; + } else if (sslHandler.engine().getNeedClientAuth()) { + LOG.error( + "Could not get peer certificate on TLS connection from {}, although one is required", + remoteAddress); + } + } catch (SSLPeerUnverifiedException e) { + if (sslHandler.engine().getNeedClientAuth()) { + LOG.error( + "Could not get peer certificate on TLS connection from {}, although one is required", + remoteAddress, e); + } + } catch (Exception e) { + LOG.error("Unexpected error getting peer certificate for TLS connection from {}", + remoteAddress, e); + } + }); + p.addLast("ssl", sslHandler); LOG.debug("SSL handler added for channel: {}", p.channel()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 4f299b4a85d2..43432324579b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetAddress; +import java.security.cert.X509Certificate; import java.util.Optional; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; @@ -60,6 +61,13 @@ default Optional getRequestUserName() { return getRequestUser().map(User::getShortName); } + /** + * Returns the TLS certificate(s) that the client presented to this HBase server when making its + * connection. TLS is orthogonal to Kerberos, so this is unrelated to + * {@link RpcCallContext#getRequestUser()}. Both, one, or neither may be present. + */ + Optional getClientCertificateChain(); + /** Returns Address of remote client in this call */ InetAddress getRemoteAddress(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index a2c578fd6664..25d153c068aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; +import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -95,6 +96,7 @@ public abstract class ServerCall implements RpcCa protected final User user; protected final InetAddress remoteAddress; + protected final X509Certificate[] clientCertificateChain; protected RpcCallback rpcCallback; private long responseCellSize = 0; @@ -135,9 +137,11 @@ public abstract class ServerCall implements RpcCa if (connection != null) { this.user = connection.user; this.retryImmediatelySupported = connection.retryImmediatelySupported; + this.clientCertificateChain = connection.clientCertificateChain; } else { this.user = null; this.retryImmediatelySupported = false; + this.clientCertificateChain = null; } this.remoteAddress = remoteAddress; this.timeout = timeout; @@ -499,6 +503,11 @@ public Optional getRequestUser() { return Optional.ofNullable(user); } + @Override + public Optional getClientCertificateChain() { + return Optional.ofNullable(clientCertificateChain); + } + @Override public InetAddress getRemoteAddress() { return remoteAddress; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 695f1e7050c4..4c32b2b6a5fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -31,6 +31,7 @@ import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; +import java.security.cert.X509Certificate; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -133,6 +134,7 @@ abstract class ServerRpcConnection implements Closeable { protected User user = null; protected UserGroupInformation ugi = null; protected SaslServerAuthenticationProviders saslProviders = null; + protected X509Certificate[] clientCertificateChain = null; public ServerRpcConnection(RpcServer rpcServer) { this.rpcServer = rpcServer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 00953353187e..124214e46af3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -22,6 +22,7 @@ import java.net.InetAddress; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; +import java.security.cert.X509Certificate; import java.util.Collections; import java.util.List; import java.util.Map; @@ -814,6 +815,11 @@ public Optional getRequestUser() { return getUser(userName); } + @Override + public Optional getClientCertificateChain() { + return Optional.empty(); + } + @Override public InetAddress getRemoteAddress() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java index 67d8a2579097..1de0a0d31a33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; +import java.security.cert.X509Certificate; import java.util.Arrays; import java.util.Collections; import java.util.Map; @@ -213,6 +214,11 @@ public Optional getRequestUser() { return null; } + @Override + public Optional getClientCertificateChain() { + return Optional.empty(); + } + @Override public InetAddress getRemoteAddress() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index d069c2560a55..fdd5c7d5cf90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetAddress; +import java.security.cert.X509Certificate; import java.util.HashSet; import java.util.Map; import java.util.Optional; @@ -275,6 +276,11 @@ public Optional getRequestUser() { return Optional.empty(); } + @Override + public Optional getClientCertificateChain() { + return Optional.empty(); + } + @Override public InetAddress getRemoteAddress() { return null; From f3db223587685388f655dfe83d2f8cc745ea9443 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 29 Jan 2024 23:52:48 +0800 Subject: [PATCH 235/514] HBASE-28331 Client integration test fails after upgrading hadoop3 version to 3.3.x (#5657) Signed-off-by: Yi Mei --- dev-support/Jenkinsfile | 2 ++ dev-support/patch-hadoop3.sh | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100755 dev-support/patch-hadoop3.sh diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 9c03d97a3c44..5ff411ee473c 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -726,6 +726,8 @@ pipeline { echo "Attempting to use run an instance on top of Hadoop 3." artifact=$(ls -1 "${WORKSPACE}"/hadoop-3*.tar.gz | head -n 1) tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3" + # we need to patch some files otherwise minicluster will fail to start, see MAPREDUCE-7471 + ${BASEDIR}/dev-support/patch-hadoop3.sh hadoop-3 if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \ --single-process \ --working-dir output-integration/hadoop-3 \ diff --git a/dev-support/patch-hadoop3.sh b/dev-support/patch-hadoop3.sh new file mode 100755 index 000000000000..fed5a535bcdd --- /dev/null +++ b/dev-support/patch-hadoop3.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +hadoop_dir=$1 + +sed -i "s/HADOOP_TOOLS_DIR=\${HADOOP_TOOLS_DIR:-\"share\/hadoop\/tools\"}/HADOOP_TOOLS_DIR=\${HADOOP_TOOLS_DIR:-\"\$HADOOP_TOOLS_HOME\/share\/hadoop\/tools\"}/g" "$hadoop_dir/libexec/hadoop-functions.sh" +sed -i "/hadoop_add_classpath \"\${junitjar}\"/a mockitojar=\$(echo \"\${HADOOP_TOOLS_LIB_JARS_DIR}\"\/mockito-core-[0-9]*.jar)\nhadoop_add_classpath \"\${mockitojar}\"" "$hadoop_dir/bin/mapred" +curl https://repo1.maven.org/maven2/org/mockito/mockito-core/2.28.2/mockito-core-2.28.2.jar -o "$hadoop_dir/share/hadoop/tools/lib/mockito-core-2.28.2.jar" From f34e214ffc9d6f70ead8d8730e3d3199523ed419 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 30 Jan 2024 11:23:07 +0800 Subject: [PATCH 236/514] HBASE-28331 Addendum remove stale hadoop tarballs in client integration test --- dev-support/Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 5ff411ee473c..a767775b36b4 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -661,6 +661,9 @@ pipeline { rm -rf "hadoop-3" && mkdir "hadoop-3" rm -rf ".m2-for-repo" && mkdir ".m2-for-repo" rm -rf ".m2-for-src" && mkdir ".m2-for-src" + # remove old hadoop tarballs in workspace + rm -rf hadoop-2*.tar.gz + rm -rf hadoop-3*.tar.gz echo "(x) {color:red}-1 source release artifact{color}\n-- Something went wrong with this stage, [check relevant console output|${BUILD_URL}/console]." >output-srctarball/commentfile echo "(x) {color:red}-1 client integration test{color}\n-- Something went wrong with this stage, [check relevant console output|${BUILD_URL}/console]." >output-integration/commentfile ''' From c4e332a93b563d6a8a832fd46b552f68a3e76490 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Tue, 30 Jan 2024 14:20:34 -0500 Subject: [PATCH 237/514] HBASE-28336 Correctly cast array of Certificates to array of X509Certificates (#5658) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/ipc/NettyRpcServer.java | 50 ++++++++------- .../hadoop/hbase/ipc/TestNettyRpcServer.java | 63 +++++++++++++++++++ 2 files changed, 92 insertions(+), 21 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index c291338e40c2..629b3468cbe5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -416,33 +416,41 @@ private void initSSL(ChannelPipeline p, NettyServerRpcConnection conn, boolean s sslHandler.setWrapDataSize( conf.getInt(HBASE_SERVER_NETTY_TLS_WRAP_SIZE, DEFAULT_HBASE_SERVER_NETTY_TLS_WRAP_SIZE)); - sslHandler.handshakeFuture().addListener(future -> { - try { - Certificate[] certificates = sslHandler.engine().getSession().getPeerCertificates(); - if (certificates != null && certificates.length > 0) { - conn.clientCertificateChain = (X509Certificate[]) certificates; - } else if (sslHandler.engine().getNeedClientAuth()) { - LOG.error( - "Could not get peer certificate on TLS connection from {}, although one is required", - remoteAddress); - } - } catch (SSLPeerUnverifiedException e) { - if (sslHandler.engine().getNeedClientAuth()) { - LOG.error( - "Could not get peer certificate on TLS connection from {}, although one is required", - remoteAddress, e); - } - } catch (Exception e) { - LOG.error("Unexpected error getting peer certificate for TLS connection from {}", - remoteAddress, e); - } - }); + sslHandler.handshakeFuture() + .addListener(future -> sslHandshakeCompleteHandler(conn, sslHandler, remoteAddress)); p.addLast("ssl", sslHandler); LOG.debug("SSL handler added for channel: {}", p.channel()); } } + static void sslHandshakeCompleteHandler(NettyServerRpcConnection conn, SslHandler sslHandler, + SocketAddress remoteAddress) { + try { + Certificate[] certificates = sslHandler.engine().getSession().getPeerCertificates(); + if (certificates != null && certificates.length > 0) { + X509Certificate[] x509Certificates = new X509Certificate[certificates.length]; + for (int i = 0; i < x509Certificates.length; i++) { + x509Certificates[i] = (X509Certificate) certificates[i]; + } + conn.clientCertificateChain = x509Certificates; + } else if (sslHandler.engine().getNeedClientAuth()) { + LOG.debug( + "Could not get peer certificate on TLS connection from {}, although one is required", + remoteAddress); + } + } catch (SSLPeerUnverifiedException e) { + if (sslHandler.engine().getNeedClientAuth()) { + LOG.debug( + "Could not get peer certificate on TLS connection from {}, although one is required", + remoteAddress, e); + } + } catch (Exception e) { + LOG.debug("Unexpected error getting peer certificate for TLS connection from {}", + remoteAddress, e); + } + } + SslContext getSslContext() throws X509Exception, IOException { SslContext result = sslContextForServer.get(); if (result == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java index 6af67e2190d3..2d5b95028f6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java @@ -17,11 +17,25 @@ */ package org.apache.hadoop.hbase.ipc; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; import java.util.Arrays; import java.util.Collection; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -47,6 +61,8 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslHandler; + @Category({ RPCTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestNettyRpcServer { @@ -122,4 +138,51 @@ protected void doTest(TableName tableName) throws Exception { } } + private static final String CERTIFICATE = "-----BEGIN CERTIFICATE-----\n" + + "MIIEITCCAwmgAwIBAgIUaLL8vLOhWLCLXVHEJqXJhfmsTB8wDQYJKoZIhvcNAQEL\n" + + "BQAwgawxCzAJBgNVBAYTAlVTMRYwFAYDVQQIDA1NYXNzYWNodXNldHRzMRIwEAYD\n" + + "VQQHDAlDYW1icmlkZ2UxGDAWBgNVBAoMD25ldHR5IHRlc3QgY2FzZTEYMBYGA1UE\n" + + "CwwPbmV0dHkgdGVzdCBjYXNlMRgwFgYDVQQDDA9uZXR0eSB0ZXN0IGNhc2UxIzAh\n" + + "BgkqhkiG9w0BCQEWFGNjb25uZWxsQGh1YnNwb3QuY29tMB4XDTI0MDEyMTE5MzMy\n" + + "MFoXDTI1MDEyMDE5MzMyMFowgawxCzAJBgNVBAYTAlVTMRYwFAYDVQQIDA1NYXNz\n" + + "YWNodXNldHRzMRIwEAYDVQQHDAlDYW1icmlkZ2UxGDAWBgNVBAoMD25ldHR5IHRl\n" + + "c3QgY2FzZTEYMBYGA1UECwwPbmV0dHkgdGVzdCBjYXNlMRgwFgYDVQQDDA9uZXR0\n" + + "eSB0ZXN0IGNhc2UxIzAhBgkqhkiG9w0BCQEWFGNjb25uZWxsQGh1YnNwb3QuY29t\n" + + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy+qzEZpQMjVdLj0siUcG\n" + + "y8LIHOW4S+tgHIKFkF865qWq6FVGbROe2Z0f5W6yIamZkdxzptT0iv+8S5okNNeW\n" + + "2NbsN/HNJIRtWfxku1Jh1gBqSkAYIjXyq7+20hIaJTzzxqike9M/Lc14EGb33Ja/\n" + + "kDPRV3UtiM3Ntf3eALXKbrWptkbgQngCaTgtfg8IkMAEpP270wZ9fW0lDHv3NPPt\n" + + "Zt0QSJzWSqWfu+l4ayvcUQYyNJesx9YmTHSJu69lvT4QApoX8FEiHfNCJ28R50CS\n" + + "aIgOpCWUvkH7rqx0p9q393uJRS/S6RlLbU30xUN1fNrVmP/XAapfy+R0PSgiUi8o\n" + + "EQIDAQABozkwNzAWBgNVHRIEDzANggt3d3cuZm9vLmNvbTAdBgNVHQ4EFgQUl4FD\n" + + "Y8jJ/JHJR68YqPsGUjUJuwgwDQYJKoZIhvcNAQELBQADggEBADVzivYz2M0qsWUc\n" + + "jXjCHymwTIr+7ud10um53FbYEAfKWsIY8Pp35fKpFzUwc5wVdCnLU86K/YMKRzNB\n" + + "zL2Auow3PJFRvXecOv7dWxNlNneLDcwbVrdNRu6nQXmZUgyz0oUKuJbF+JGtI+7W\n" + + "kRw7yhBfki+UCSQWeDqvaWzgmA4Us0N8NFq3euAs4xFbMMPMQWrT9Z7DGchCeRiB\n" + + "dkQBvh88vbR3v2Saq14W4Wt5rj2++vXWGQSeAQL6nGbOwc3ohW6isNNV0eGQQTmS\n" + + "khS2d/JDZq2XL5RGexf3CA6YYzWiTr9YZHNjuobvLH7mVnA2c8n6Zty/UhfnuK1x\n" + "JbkleFk=\n" + + "-----END CERTIFICATE-----"; + + @Test + public void testHandshakeCompleteHandler() + throws SSLPeerUnverifiedException, CertificateException { + NettyServerRpcConnection conn = mock(NettyServerRpcConnection.class); + SslHandler sslHandler = mock(SslHandler.class); + SocketAddress remoteAddress = new InetSocketAddress("localhost", 5555); + SSLEngine engine = mock(SSLEngine.class); + SSLSession session = mock(SSLSession.class); + CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); + X509Certificate x509Certificate = (X509Certificate) certificateFactory + .generateCertificate(new ByteArrayInputStream(CERTIFICATE.getBytes(StandardCharsets.UTF_8))); + Certificate[] certificates = new Certificate[] { x509Certificate }; + + when(sslHandler.engine()).thenReturn(engine); + when(engine.getSession()).thenReturn(session); + when(session.getPeerCertificates()).thenReturn(certificates); + + NettyRpcServer.sslHandshakeCompleteHandler(conn, sslHandler, remoteAddress); + + assertArrayEquals(certificates, conn.clientCertificateChain); + } + } From f253453d92fc20e6ce18a06f5db9e6b795889707 Mon Sep 17 00:00:00 2001 From: Nirdosh Yadav Date: Mon, 5 Feb 2024 18:46:43 +0530 Subject: [PATCH 238/514] HBASE-28334 Add comment around erasure coding policy in DEFAULT_VALUE map (#5662) Signed-off-by: Bryan Beaudreault --- .../org/apache/hadoop/hbase/client/TableDescriptorBuilder.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 7cab90c7e378..fcdbe4e4ae64 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -234,6 +234,7 @@ public class TableDescriptorBuilder { DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); + // Setting ERASURE_CODING_POLICY to NULL so that it is not considered as metadata DEFAULT_VALUES.put(ERASURE_CODING_POLICY, String.valueOf(DEFAULT_ERASURE_CODING_POLICY)); DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) .forEach(RESERVED_KEYWORDS::add); From 256f10be78f0e1811b9e2374d0e5960eae9d8d20 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Mon, 5 Feb 2024 08:26:31 -0500 Subject: [PATCH 239/514] HBASE-28338 Bounded leak of FSDataInputStream buffers from checksum switching (#5660) Signed-off-by: Duo Zhang --- .../hbase/io/FSDataInputStreamWrapper.java | 46 ++++--------------- .../io/TestFSDataInputStreamWrapper.java | 28 +++++------ 2 files changed, 23 insertions(+), 51 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index cb9dc84b94be..33eace47d632 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -19,17 +19,13 @@ import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -40,8 +36,6 @@ */ @InterfaceAudience.Private public class FSDataInputStreamWrapper implements Closeable { - private static final Logger LOG = LoggerFactory.getLogger(FSDataInputStreamWrapper.class); - private static final boolean isLogTraceEnabled = LOG.isTraceEnabled(); private final HFileSystem hfs; private final Path path; @@ -94,9 +88,6 @@ private static class ReadStatistics { long totalZeroCopyBytesRead; } - private Boolean instanceOfCanUnbuffer = null; - private CanUnbuffer unbuffer = null; - protected Path readerPath; public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException { @@ -314,41 +305,22 @@ public HFileSystem getHfs() { * stream, the current socket will be closed and a new socket will be opened to serve the * requests. */ - @SuppressWarnings({ "rawtypes" }) public void unbuffer() { + // todo: it may make sense to always unbuffer both streams. we'd need to carefully + // research the usages to know if that is safe. for now just do the current. FSDataInputStream stream = this.getStream(this.shouldUseHBaseChecksum()); if (stream != null) { - InputStream wrappedStream = stream.getWrappedStream(); - // CanUnbuffer interface was added as part of HDFS-7694 and the fix is available in Hadoop - // 2.6.4+ and 2.7.1+ versions only so check whether the stream object implements the - // CanUnbuffer interface or not and based on that call the unbuffer api. - final Class streamClass = wrappedStream.getClass(); - if (this.instanceOfCanUnbuffer == null) { - // To ensure we compute whether the stream is instance of CanUnbuffer only once. - this.instanceOfCanUnbuffer = false; - if (wrappedStream instanceof CanUnbuffer) { - this.unbuffer = (CanUnbuffer) wrappedStream; - this.instanceOfCanUnbuffer = true; - } - } - if (this.instanceOfCanUnbuffer) { - try { - this.unbuffer.unbuffer(); - } catch (UnsupportedOperationException e) { - if (isLogTraceEnabled) { - LOG.trace("Failed to invoke 'unbuffer' method in class " + streamClass - + " . So there may be the stream does not support unbuffering.", e); - } - } - } else { - if (isLogTraceEnabled) { - LOG.trace("Failed to find 'unbuffer' method in class " + streamClass); - } - } + stream.unbuffer(); } } public Path getReaderPath() { return readerPath; } + + // For tests + void setShouldUseHBaseChecksum() { + useHBaseChecksumConfigured = true; + useHBaseChecksum = true; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java index bb476670f10f..77aa00ef91f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.io; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -31,6 +32,7 @@ import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.io.ByteBufferPool; @@ -48,22 +50,22 @@ public class TestFSDataInputStreamWrapper { @Test public void testUnbuffer() throws Exception { InputStream pc = new ParentClass(); - FSDataInputStreamWrapper fsdisw1 = new FSDataInputStreamWrapper(new FSDataInputStream(pc)); + InputStream noChecksumPc = new ParentClass(); + FSDataInputStreamWrapper fsdisw1 = + new FSDataInputStreamWrapper(new FSDataInputStream(pc), new FSDataInputStream(noChecksumPc)); fsdisw1.unbuffer(); - // parent class should be true + // should have called main stream unbuffer, but not no-checksum assertTrue(((ParentClass) pc).getIsCallUnbuffer()); + assertFalse(((ParentClass) noChecksumPc).getIsCallUnbuffer()); + // switch to checksums and call unbuffer again. should unbuffer the nochecksum stream now + fsdisw1.setShouldUseHBaseChecksum(); + fsdisw1.unbuffer(); + assertTrue(((ParentClass) noChecksumPc).getIsCallUnbuffer()); fsdisw1.close(); - - InputStream cc1 = new ChildClass1(); - FSDataInputStreamWrapper fsdisw2 = new FSDataInputStreamWrapper(new FSDataInputStream(cc1)); - fsdisw2.unbuffer(); - // child1 class should be true - assertTrue(((ChildClass1) cc1).getIsCallUnbuffer()); - fsdisw2.close(); } private class ParentClass extends FSInputStream implements ByteBufferReadable, CanSetDropBehind, - CanSetReadahead, HasEnhancedByteBufferAccess, CanUnbuffer { + CanSetReadahead, HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities { public boolean isCallUnbuffer = false; @@ -122,12 +124,10 @@ public long getPos() throws IOException { public boolean seekToNewSource(long paramLong) throws IOException { return false; } - } - private class ChildClass1 extends ParentClass { @Override - public void unbuffer() { - isCallUnbuffer = true; + public boolean hasCapability(String s) { + return s.equals(StreamCapabilities.UNBUFFER); } } } From 16de74c194aa5db028f78d926b82b84c4df8b257 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 6 Feb 2024 10:08:12 +0800 Subject: [PATCH 240/514] HBASE-25051 DIGEST based auth broken for rpc based ConnectionRegistry (#5631) Signed-off-by: Bryan Beaudreault --- .../AbstractRpcBasedConnectionRegistry.java | 61 +++---- .../hadoop/hbase/client/ClusterIdFetcher.java | 134 ++++++++++++++ .../hbase/client/ConnectionFactory.java | 2 +- .../client/ConnectionOverAsyncConnection.java | 2 +- .../client/ConnectionRegistryFactory.java | 5 +- .../ConnectionRegistryRpcStubHolder.java | 168 ++++++++++++++++++ .../hadoop/hbase/client/ConnectionUtils.java | 10 ++ .../hadoop/hbase/client/MasterRegistry.java | 8 +- .../hbase/client/RpcConnectionRegistry.java | 7 +- .../hbase/client/TableOverAsyncTable.java | 2 +- .../hbase/client/ZKConnectionRegistry.java | 4 +- .../hadoop/hbase/ipc/AbstractRpcClient.java | 2 +- .../hbase/ipc/BlockingRpcConnection.java | 103 +++-------- .../org/apache/hadoop/hbase/ipc/Call.java | 5 + .../hadoop/hbase/ipc/NettyRpcConnection.java | 26 ++- .../hbase/ipc/NettyRpcDuplexHandler.java | 88 +-------- .../apache/hadoop/hbase/ipc/RpcClient.java | 2 + .../hadoop/hbase/ipc/RpcConnection.java | 120 ++++++++++++- .../BuiltInSaslAuthenticationProvider.java | 6 + .../DigestSaslAuthenticationProvider.java | 5 +- .../GssSaslAuthenticationProvider.java | 5 +- .../SimpleSaslAuthenticationProvider.java | 6 +- .../client/DoNothingConnectionRegistry.java | 5 +- .../client/TestAsyncAdminRpcPriority.java | 7 +- .../client/TestAsyncConnectionTracing.java | 7 +- .../TestAsyncMetaRegionLocatorFailFast.java | 11 +- .../client/TestAsyncRegionLocatorTracing.java | 6 +- .../client/TestAsyncTableRpcPriority.java | 6 +- .../hbase/client/TestAsyncTableTracing.java | 60 +++---- .../client/TestConnectionRegistryLeak.java | 5 +- .../TestRpcBasedRegistryHedgedReads.java | 14 +- .../hbase/ipc/TestTLSHandshadeFailure.java | 7 +- .../hadoop/hbase/util/ConcurrentMapUtils.java | 8 - .../hbase/util/IOExceptionSupplier.java | 30 ++++ .../src/main/protobuf/server/Registry.proto | 19 +- .../client/ClusterConnectionFactory.java | 2 +- .../hadoop/hbase/ipc/NettyRpcServer.java | 9 +- .../ipc/NettyRpcServerPreambleHandler.java | 18 +- .../hadoop/hbase/ipc/ServerRpcConnection.java | 79 +++++--- .../hbase/ipc/SimpleServerRpcConnection.java | 19 +- .../apache/hadoop/hbase/wal/WALSplitUtil.java | 2 +- .../TestZooKeeperTableArchiveClient.java | 13 +- .../client/AbstractTestRegionLocator.java | 3 +- .../hbase/client/DummyConnectionRegistry.java | 57 ------ .../TestAsyncAdminWithRegionReplicas.java | 3 +- .../client/TestAsyncMetaRegionLocator.java | 4 +- .../client/TestAsyncNonMetaRegionLocator.java | 4 +- ...ncNonMetaRegionLocatorConcurrenyLimit.java | 2 +- .../hbase/client/TestAsyncRegionLocator.java | 2 +- ...stAsyncSingleRequestRpcRetryingCaller.java | 2 +- .../client/TestAsyncTableUseMetaReplicas.java | 4 +- .../hbase/client/TestBootstrapNodeUpdate.java | 3 +- ...talogReplicaLoadBalanceSimpleSelector.java | 3 +- .../hbase/client/TestMasterRegistry.java | 54 +++--- .../client/TestMetaRegionLocationCache.java | 4 +- .../client/TestRpcConnectionRegistry.java | 57 +++++- .../client/TestZKConnectionRegistry.java | 6 +- .../hadoop/hbase/ipc/AbstractTestIPC.java | 83 ++++++++- .../hadoop/hbase/ipc/TestBlockingIPC.java | 4 +- .../apache/hadoop/hbase/ipc/TestNettyIPC.java | 5 +- .../hadoop/hbase/ipc/TestNettyTlsIPC.java | 23 ++- .../regionserver/TestWALEntrySinkFilter.java | 13 +- .../token/TestGenerateDelegationToken.java | 80 ++++++--- 63 files changed, 1031 insertions(+), 483 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterIdFetcher.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryRpcStubHolder.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionSupplier.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 4e97dcab24dd..62c6951b4535 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -33,22 +33,17 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; -import org.apache.hadoop.hbase.ipc.RpcClient; -import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; @@ -79,30 +74,21 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry private final int hedgedReadFanOut; - // Configured list of end points to probe the meta information from. - private volatile ImmutableMap addr2Stub; - // RPC client used to talk to the masters. - private final RpcClient rpcClient; + private final ConnectionRegistryRpcStubHolder rpcStubHolder; private final RpcControllerFactory rpcControllerFactory; - private final int rpcTimeoutMs; private final RegistryEndpointsRefresher registryEndpointRefresher; - protected AbstractRpcBasedConnectionRegistry(Configuration conf, + protected AbstractRpcBasedConnectionRegistry(Configuration conf, User user, String hedgedReqsFanoutConfigName, String initialRefreshDelaySecsConfigName, String refreshIntervalSecsConfigName, String minRefreshIntervalSecsConfigName) throws IOException { this.hedgedReadFanOut = Math.max(1, conf.getInt(hedgedReqsFanoutConfigName, HEDGED_REQS_FANOUT_DEFAULT)); - rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE, - conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); - // XXX: we pass cluster id as null here since we do not have a cluster id yet, we have to fetch - // this through the master registry... - // This is a problem as we will use the cluster id to determine the authentication method - rpcClient = RpcClientFactory.createClient(conf, null); rpcControllerFactory = RpcControllerFactory.instantiate(conf); - populateStubs(getBootstrapNodes(conf)); + rpcStubHolder = new ConnectionRegistryRpcStubHolder(conf, user, rpcControllerFactory, + getBootstrapNodes(conf)); // could return null here is refresh interval is less than zero registryEndpointRefresher = RegistryEndpointsRefresher.create(conf, initialRefreshDelaySecsConfigName, @@ -114,19 +100,7 @@ protected AbstractRpcBasedConnectionRegistry(Configuration conf, protected abstract CompletableFuture> fetchEndpoints(); private void refreshStubs() throws IOException { - populateStubs(FutureUtils.get(fetchEndpoints())); - } - - private void populateStubs(Set addrs) throws IOException { - Preconditions.checkNotNull(addrs); - ImmutableMap.Builder builder = - ImmutableMap.builderWithExpectedSize(addrs.size()); - User user = User.getCurrent(); - for (ServerName masterAddr : addrs) { - builder.put(masterAddr, - ClientMetaService.newStub(rpcClient.createRpcChannel(masterAddr, user, rpcTimeoutMs))); - } - addr2Stub = builder.build(); + rpcStubHolder.refreshStubs(() -> FutureUtils.get(fetchEndpoints())); } /** @@ -211,20 +185,25 @@ private void groupCall(CompletableFuture future, Set CompletableFuture call(Callable callable, Predicate isValidResp, String debug) { - ImmutableMap addr2StubRef = addr2Stub; - Set servers = addr2StubRef.keySet(); - List stubs = new ArrayList<>(addr2StubRef.values()); - Collections.shuffle(stubs, ThreadLocalRandom.current()); CompletableFuture future = new CompletableFuture<>(); - groupCall(future, servers, stubs, 0, callable, isValidResp, debug, - new ConcurrentLinkedQueue<>()); + FutureUtils.addListener(rpcStubHolder.getStubs(), (addr2Stub, error) -> { + if (error != null) { + future.completeExceptionally(error); + return; + } + Set servers = addr2Stub.keySet(); + List stubs = new ArrayList<>(addr2Stub.values()); + Collections.shuffle(stubs, ThreadLocalRandom.current()); + groupCall(future, servers, stubs, 0, callable, isValidResp, debug, + new ConcurrentLinkedQueue<>()); + }); return future; } @RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*") - Set getParsedServers() { - return addr2Stub.keySet(); + Set getParsedServers() throws IOException { + return FutureUtils.get(rpcStubHolder.getStubs()).keySet(); } /** @@ -277,8 +256,8 @@ public void close() { if (registryEndpointRefresher != null) { registryEndpointRefresher.stop(); } - if (rpcClient != null) { - rpcClient.close(); + if (rpcStubHolder != null) { + rpcStubHolder.close(); } }, getClass().getSimpleName() + ".close"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterIdFetcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterIdFetcher.java new file mode 100644 index 000000000000..277629681ec6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterIdFetcher.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ConnectionRegistryService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryResponse; + +/** + * Fetch cluster id through special preamble header. + *

+ * An instance of this class should only be used once, like: + * + *

+ * new ClusterIdFetcher().fetchClusterId()
+ * 
+ * + * Calling the fetchClusterId multiple times will lead unexpected behavior. + *

+ * See HBASE-25051 for more details. + */ +@InterfaceAudience.Private +class ClusterIdFetcher { + + private static final Logger LOG = LoggerFactory.getLogger(ClusterIdFetcher.class); + + private final List bootstrapServers; + + private final User user; + + private final RpcClient rpcClient; + + private final RpcControllerFactory rpcControllerFactory; + + private final CompletableFuture future; + + ClusterIdFetcher(Configuration conf, User user, RpcControllerFactory rpcControllerFactory, + Set bootstrapServers) { + this.user = user; + // use null cluster id here as we do not know the cluster id yet, we will fetch it through this + // rpc client + this.rpcClient = RpcClientFactory.createClient(conf, null); + this.rpcControllerFactory = rpcControllerFactory; + this.bootstrapServers = new ArrayList(bootstrapServers); + // shuffle the bootstrap servers so we will not always fetch from the same one + Collections.shuffle(this.bootstrapServers); + future = new CompletableFuture(); + } + + /** + * Try get cluster id from the server with the given {@code index} in {@link #bootstrapServers}. + */ + private void getClusterId(int index) { + ServerName server = bootstrapServers.get(index); + LOG.debug("Going to request {} for getting cluster id", server); + // user and rpcTimeout are both not important here, as we will not actually send any rpc calls + // out, only a preamble connection header, but if we pass null as user, there will be NPE in + // some code paths... + RpcChannel channel = rpcClient.createRpcChannel(server, user, 0); + ConnectionRegistryService.Interface stub = ConnectionRegistryService.newStub(channel); + HBaseRpcController controller = rpcControllerFactory.newController(); + stub.getConnectionRegistry(controller, GetConnectionRegistryRequest.getDefaultInstance(), + new RpcCallback() { + + @Override + public void run(GetConnectionRegistryResponse resp) { + if (!controller.failed()) { + LOG.debug("Got connection registry info: {}", resp); + future.complete(resp.getClusterId()); + return; + } + if (ConnectionUtils.isUnexpectedPreambleHeaderException(controller.getFailed())) { + // this means we have connected to an old server where it does not support passing + // cluster id through preamble connnection header, so we fallback to use null + // cluster id, which is the old behavior + LOG.debug("Failed to get connection registry info, should be an old server," + + " fallback to use null cluster id", controller.getFailed()); + future.complete(null); + } else { + LOG.debug("Failed to get connection registry info", controller.getFailed()); + if (index == bootstrapServers.size() - 1) { + future.completeExceptionally(controller.getFailed()); + } else { + // try next bootstrap server + getClusterId(index + 1); + } + } + } + }); + + } + + CompletableFuture fetchClusterId() { + getClusterId(0); + // close the rpc client after we finish the request + FutureUtils.addListener(future, (r, e) -> rpcClient.close()); + return future; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index ac70091dcf65..716fb4863fe8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -338,7 +338,7 @@ public static CompletableFuture createAsyncConnection(Configura final User user, Map connectionAttributes) { return TraceUtil.tracedFuture(() -> { CompletableFuture future = new CompletableFuture<>(); - ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf); + ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf, user); addListener(registry.getClusterId(), (clusterId, error) -> { if (error != null) { registry.close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java index 51368fc23c15..30c348e6d1f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.log.HBaseMarkers; -import org.apache.hadoop.hbase.util.ConcurrentMapUtils.IOExceptionSupplier; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.IOExceptionSupplier; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index f198c3c22002..415d46397b8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -33,10 +34,10 @@ private ConnectionRegistryFactory() { } /** Returns The connection registry implementation to use. */ - static ConnectionRegistry getRegistry(Configuration conf) { + static ConnectionRegistry getRegistry(Configuration conf, User user) { Class clazz = conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class, ConnectionRegistry.class); - return ReflectionUtils.newInstance(clazz, conf); + return ReflectionUtils.newInstance(clazz, conf, user); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryRpcStubHolder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryRpcStubHolder.java new file mode 100644 index 000000000000..3dbcfbe8e6bf --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryRpcStubHolder.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.IOExceptionSupplier; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService; + +/** + * A class for creating {@link RpcClient} and related stubs used by + * {@link AbstractRpcBasedConnectionRegistry}. We need to connect to bootstrap nodes to get the + * cluster id first, before creating the final {@link RpcClient} and related stubs. + *

+ * See HBASE-25051 for more details. + */ +@InterfaceAudience.Private +class ConnectionRegistryRpcStubHolder implements Closeable { + + private static final Logger LOG = LoggerFactory.getLogger(ConnectionRegistryRpcStubHolder.class); + + private final Configuration conf; + + // used for getting cluster id + private final Configuration noAuthConf; + + private final User user; + + private final RpcControllerFactory rpcControllerFactory; + + private final Set bootstrapNodes; + + private final int rpcTimeoutMs; + + private volatile ImmutableMap addr2Stub; + + private volatile RpcClient rpcClient; + + private CompletableFuture> addr2StubFuture; + + ConnectionRegistryRpcStubHolder(Configuration conf, User user, + RpcControllerFactory rpcControllerFactory, Set bootstrapNodes) { + this.conf = conf; + if (User.isHBaseSecurityEnabled(conf)) { + this.noAuthConf = new Configuration(conf); + this.noAuthConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); + } else { + this.noAuthConf = conf; + } + this.user = user; + this.rpcControllerFactory = rpcControllerFactory; + this.bootstrapNodes = Collections.unmodifiableSet(bootstrapNodes); + this.rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE, + conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + } + + private ImmutableMap createStubs(RpcClient rpcClient, + Collection addrs) { + LOG.debug("Going to use new servers to create stubs: {}", addrs); + Preconditions.checkNotNull(addrs); + ImmutableMap.Builder builder = + ImmutableMap.builderWithExpectedSize(addrs.size()); + for (ServerName masterAddr : addrs) { + builder.put(masterAddr, + ClientMetaService.newStub(rpcClient.createRpcChannel(masterAddr, user, rpcTimeoutMs))); + } + return builder.build(); + } + + private CompletableFuture> + fetchClusterIdAndCreateStubs() { + CompletableFuture> future = + new CompletableFuture<>(); + addr2StubFuture = future; + FutureUtils.addListener( + new ClusterIdFetcher(noAuthConf, user, rpcControllerFactory, bootstrapNodes).fetchClusterId(), + (clusterId, error) -> { + synchronized (ConnectionRegistryRpcStubHolder.this) { + if (error != null) { + addr2StubFuture.completeExceptionally(error); + } else { + RpcClient c = RpcClientFactory.createClient(conf, clusterId); + ImmutableMap m = + createStubs(c, bootstrapNodes); + rpcClient = c; + addr2Stub = m; + addr2StubFuture.complete(m); + } + addr2StubFuture = null; + } + }); + // here we must use the local variable future instead of addr2StubFuture, as the above listener + // could be executed directly in the same thread(if the future completes quick enough), since + // the synchronized lock is reentrant, it could set addr2StubFuture to null in the end, so when + // arriving here the addr2StubFuture could be null. + return future; + } + + CompletableFuture> getStubs() { + ImmutableMap s = this.addr2Stub; + if (s != null) { + return CompletableFuture.completedFuture(s); + } + synchronized (this) { + s = this.addr2Stub; + if (s != null) { + return CompletableFuture.completedFuture(s); + } + if (addr2StubFuture != null) { + return addr2StubFuture; + } + return fetchClusterIdAndCreateStubs(); + } + } + + void refreshStubs(IOExceptionSupplier> fetchEndpoints) throws IOException { + // There is no actual call yet so we have not initialize the rpc client and related stubs yet, + // give up refreshing + if (addr2Stub == null) { + LOG.debug("Skip refreshing stubs as we have not initialized rpc client yet"); + return; + } + LOG.debug("Going to refresh stubs"); + assert rpcClient != null; + addr2Stub = createStubs(rpcClient, fetchEndpoints.get()); + } + + @Override + public void close() { + if (rpcClient != null) { + rpcClient.close(); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 4827708a02e3..d073fef929fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.ipc.FatalConnectionException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.util.Bytes; @@ -663,4 +664,13 @@ static void setCoprocessorError(RpcController controller, Throwable error) { controller.setFailed(error.toString()); } } + + static boolean isUnexpectedPreambleHeaderException(IOException e) { + if (!(e instanceof RemoteException)) { + return false; + } + RemoteException re = (RemoteException) e; + return FatalConnectionException.class.getName().equals(re.getClassName()) + && re.getMessage().startsWith("Expected HEADER="); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index b6f81c30f0bd..364180fe1414 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.DNS.ServerType; import org.apache.yetus.audience.InterfaceAudience; @@ -105,9 +106,10 @@ public static Set parseMasterAddrs(Configuration conf) throws Unknow private final String connectionString; - MasterRegistry(Configuration conf) throws IOException { - super(conf, MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, MASTER_REGISTRY_INITIAL_REFRESH_DELAY_SECS, - MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS, MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES); + MasterRegistry(Configuration conf, User user) throws IOException { + super(conf, user, MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, + MASTER_REGISTRY_INITIAL_REFRESH_DELAY_SECS, MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS, + MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES); connectionString = getConnectionString(conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java index 2c320d3a9d1d..c3ed560923ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistry.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; @@ -75,9 +76,9 @@ public class RpcConnectionRegistry extends AbstractRpcBasedConnectionRegistry { private final String connectionString; - RpcConnectionRegistry(Configuration conf) throws IOException { - super(conf, HEDGED_REQS_FANOUT_KEY, INITIAL_REFRESH_DELAY_SECS, PERIODIC_REFRESH_INTERVAL_SECS, - MIN_SECS_BETWEEN_REFRESHES); + RpcConnectionRegistry(Configuration conf, User user) throws IOException { + super(conf, user, HEDGED_REQS_FANOUT_KEY, INITIAL_REFRESH_DELAY_SECS, + PERIODIC_REFRESH_INTERVAL_SECS, MIN_SECS_BETWEEN_REFRESHES); connectionString = buildConnectionString(conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java index 0a7dabd476ce..8c61e8b584f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java @@ -55,9 +55,9 @@ import org.apache.hadoop.hbase.trace.HBaseSemanticAttributes; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ConcurrentMapUtils.IOExceptionSupplier; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.IOExceptionSupplier; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 1634b13ec7e8..0e13f0b83c91 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -60,7 +61,8 @@ class ZKConnectionRegistry implements ConnectionRegistry { private final ZNodePaths znodePaths; - ZKConnectionRegistry(Configuration conf) { + // User not used, but for rpc based registry we need it + ZKConnectionRegistry(Configuration conf, User user) { this.znodePaths = new ZNodePaths(conf); this.zk = new ReadOnlyZKClient(conf); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 5926539d0679..7972cc08acd2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -454,7 +454,7 @@ public void run(Call call) { } } - private static Address createAddr(ServerName sn) { + static Address createAddr(ServerName sn) { return Address.fromParts(sn.getHostname(), sn.getPort()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index f30b77c64fe9..0478000a2375 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.ipc.IPCUtil.buildRequestHeader; -import static org.apache.hadoop.hbase.ipc.IPCUtil.getTotalSizeWhenWrittenDelimited; import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled; import static org.apache.hadoop.hbase.ipc.IPCUtil.write; @@ -43,7 +42,6 @@ import java.util.concurrent.ThreadLocalRandom; import javax.security.sasl.SaslException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -64,19 +62,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; -import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; /** * Thread that reads responses and notifies callers. Each connection owns a socket connected to a @@ -218,7 +211,7 @@ public void cleanup(IOException e) { BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, - rpcClient.metrics, rpcClient.connectionAttributes); + rpcClient.cellBlockBuilder, rpcClient.metrics, rpcClient.connectionAttributes); this.rpcClient = rpcClient; this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); @@ -435,6 +428,15 @@ public Object run() throws IOException, InterruptedException { }); } + private void getConnectionRegistry(OutputStream outStream) throws IOException { + outStream.write(RpcClient.REGISTRY_PREAMBLE_HEADER); + } + + private void createStreams(InputStream inStream, OutputStream outStream) { + this.in = new DataInputStream(new BufferedInputStream(inStream)); + this.out = new DataOutputStream(new BufferedOutputStream(outStream)); + } + private void setupIOstreams() throws IOException { if (socket != null) { // The connection is already available. Perfect. @@ -462,6 +464,11 @@ private void setupIOstreams() throws IOException { InputStream inStream = NetUtils.getInputStream(socket); // This creates a socket with a write timeout. This timeout cannot be changed. OutputStream outStream = NetUtils.getOutputStream(socket, this.rpcClient.writeTO); + if (connectionRegistryCall != null) { + getConnectionRegistry(outStream); + createStreams(inStream, outStream); + break; + } // Write out the preamble -- MAGIC, version, and auth to use. writeConnectionHeaderPreamble(outStream); if (useSasl) { @@ -494,13 +501,11 @@ public Boolean run() throws IOException { // reconnecting because regionserver may change its sasl config after restart. } } - this.in = new DataInputStream(new BufferedInputStream(inStream)); - this.out = new DataOutputStream(new BufferedOutputStream(outStream)); + createStreams(inStream, outStream); // Now write out the connection header writeConnectionHeader(); // process the response from server for connection header if necessary processResponseForConnectionHeader(); - break; } } catch (Throwable t) { @@ -611,7 +616,9 @@ private void writeRequest(Call call) throws IOException { cellBlockMeta = null; } RequestHeader requestHeader = buildRequestHeader(call, cellBlockMeta); - + if (call.isConnectionRegistryCall()) { + connectionRegistryCall = call; + } setupIOstreams(); // Now we're going to write the call. We take the lock, then check that the connection @@ -646,77 +653,13 @@ private void writeRequest(Call call) throws IOException { * Receive a response. Because only one receiver, so no synchronization on in. */ private void readResponse() { - Call call = null; - boolean expectedCall = false; try { - // See HBaseServer.Call.setResponse for where we write out the response. - // Total size of the response. Unused. But have to read it in anyways. - int totalSize = in.readInt(); - - // Read the header - ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); - int id = responseHeader.getCallId(); - if (LOG.isTraceEnabled()) { - LOG.trace("got response header " + TextFormat.shortDebugString(responseHeader) - + ", totalSize: " + totalSize + " bytes"); - } - RemoteException remoteExc; - if (responseHeader.hasException()) { - ExceptionResponse exceptionResponse = responseHeader.getException(); - remoteExc = IPCUtil.createRemoteException(exceptionResponse); - if (IPCUtil.isFatalConnectionException(exceptionResponse)) { - // Here we will cleanup all calls so do not need to fall back, just return. - synchronized (this) { - closeConn(remoteExc); - } - return; - } - } else { - remoteExc = null; - } - - call = calls.remove(id); // call.done have to be set before leaving this method - expectedCall = (call != null && !call.isDone()); - if (!expectedCall) { - // So we got a response for which we have no corresponding 'call' here on the client-side. - // We probably timed out waiting, cleaned up all references, and now the server decides - // to return a response. There is nothing we can do w/ the response at this stage. Clean - // out the wire of the response so its out of the way and we can get other responses on - // this connection. - int readSoFar = getTotalSizeWhenWrittenDelimited(responseHeader); - int whatIsLeftToRead = totalSize - readSoFar; - LOG.debug("Unknown callId: " + id + ", skipping over this response of " + whatIsLeftToRead - + " bytes"); - IOUtils.skipFully(in, whatIsLeftToRead); - if (call != null) { - call.callStats.setResponseSizeBytes(totalSize); + readResponse(in, calls, remoteExc -> { + synchronized (this) { + closeConn(remoteExc); } - return; - } - call.callStats.setResponseSizeBytes(totalSize); - if (remoteExc != null) { - call.setException(remoteExc); - return; - } - Message value = null; - if (call.responseDefaultType != null) { - Message.Builder builder = call.responseDefaultType.newBuilderForType(); - ProtobufUtil.mergeDelimitedFrom(builder, in); - value = builder.build(); - } - CellScanner cellBlockScanner = null; - if (responseHeader.hasCellBlockMeta()) { - int size = responseHeader.getCellBlockMeta().getLength(); - byte[] cellBlock = new byte[size]; - IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length); - cellBlockScanner = - this.rpcClient.cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); - } - call.setResponse(value, cellBlockScanner); + }); } catch (IOException e) { - if (expectedCall) { - call.setException(e); - } if (e instanceof SocketTimeoutException) { // Clean up open calls but don't treat this as a fatal condition, // since we expect certain responses to not make it by the specified diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index 669fc73a3bfa..d175ea0b6e90 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -34,6 +34,7 @@ import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ConnectionRegistryService; /** A call waiting for a value. */ @InterfaceAudience.Private @@ -156,4 +157,8 @@ public synchronized boolean isDone() { public long getStartTime() { return this.callStats.getStartTime(); } + + public boolean isConnectionRegistryCall() { + return md.getService().equals(ConnectionRegistryService.getDescriptor()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 408ea347e7a3..a0f8f10d1cf9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -104,7 +104,7 @@ class NettyRpcConnection extends RpcConnection { NettyRpcConnection(NettyRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, - rpcClient.metrics, rpcClient.connectionAttributes); + rpcClient.cellBlockBuilder, rpcClient.metrics, rpcClient.connectionAttributes); this.rpcClient = rpcClient; this.eventLoop = rpcClient.group.next(); byte[] connectionHeaderPreamble = getConnectionHeaderPreamble(); @@ -274,6 +274,12 @@ public void operationComplete(Future future) throws Exception { }); } + private void getConnectionRegistry(Channel ch) throws IOException { + established(ch); + NettyFutureUtils.safeWriteAndFlush(ch, + Unpooled.directBuffer(6).writeBytes(RpcClient.REGISTRY_PREAMBLE_HEADER)); + } + private void connect() throws UnknownHostException { assert eventLoop.inEventLoop(); LOG.trace("Connecting to {}", remoteId.getAddress()); @@ -303,12 +309,16 @@ protected void initChannel(Channel ch) throws Exception { .addListener(new ChannelFutureListener() { private void succeed(Channel ch) throws IOException { - ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate()); + if (connectionRegistryCall != null) { + getConnectionRegistry(ch); + return; + } + NettyFutureUtils.safeWriteAndFlush(ch, connectionHeaderPreamble.retainedDuplicate()); if (useSasl) { saslNegotiate(ch); } else { // send the connection header to server - ch.write(connectionHeaderWithLength.retainedDuplicate()); + NettyFutureUtils.safeWrite(ch, connectionHeaderWithLength.retainedDuplicate()); established(ch); } } @@ -317,6 +327,9 @@ private void fail(Channel ch, Throwable error) { IOException ex = toIOE(error); LOG.warn("Exception encountered while connecting to the server " + remoteId.getAddress(), ex); + if (connectionRegistryCall != null) { + connectionRegistryCall.setException(ex); + } failInit(ch, ex); rpcClient.failedServers.addToFailedServers(remoteId.getAddress(), error); } @@ -346,6 +359,13 @@ public void operationComplete(ChannelFuture future) throws Exception { private void sendRequest0(Call call, HBaseRpcController hrc) throws IOException { assert eventLoop.inEventLoop(); + if (call.isConnectionRegistryCall()) { + connectionRegistryCall = call; + // For get connection registry call, we will send a special preamble header to get the + // response, instead of sending a real rpc call. See HBASE-25051 + connect(); + return; + } if (reloginInProgress) { throw new IOException(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index ad8c51568a32..44772ae2dbf9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -18,21 +18,16 @@ package org.apache.hadoop.hbase.ipc; import io.opentelemetry.context.Scope; -import java.io.EOFException; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; @@ -44,9 +39,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; /** * The netty rpc handler. @@ -127,88 +120,15 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) } } - private void finishCall(ResponseHeader responseHeader, ByteBufInputStream in, Call call) - throws IOException { - Message value; - if (call.responseDefaultType != null) { - Message.Builder builder = call.responseDefaultType.newBuilderForType(); - if (!builder.mergeDelimitedFrom(in)) { - // The javadoc of mergeDelimitedFrom says returning false means the stream reaches EOF - // before reading any bytes out, so here we need to manually finish create the EOFException - // and finish the call - call.setException(new EOFException("EOF while reading response with type: " - + call.responseDefaultType.getClass().getName())); - return; - } - value = builder.build(); - } else { - value = null; - } - CellScanner cellBlockScanner; - if (responseHeader.hasCellBlockMeta()) { - int size = responseHeader.getCellBlockMeta().getLength(); - // Maybe we could read directly from the ByteBuf. - // The problem here is that we do not know when to release it. - byte[] cellBlock = new byte[size]; - in.readFully(cellBlock); - cellBlockScanner = cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); - } else { - cellBlockScanner = null; - } - call.setResponse(value, cellBlockScanner); - } - private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOException { - int totalSize = buf.readInt(); - ByteBufInputStream in = new ByteBufInputStream(buf); - ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); - int id = responseHeader.getCallId(); - if (LOG.isTraceEnabled()) { - LOG.trace("got response header " + TextFormat.shortDebugString(responseHeader) - + ", totalSize: " + totalSize + " bytes"); - } - RemoteException remoteExc; - if (responseHeader.hasException()) { - ExceptionResponse exceptionResponse = responseHeader.getException(); - remoteExc = IPCUtil.createRemoteException(exceptionResponse); - if (IPCUtil.isFatalConnectionException(exceptionResponse)) { - // Here we will cleanup all calls so do not need to fall back, just return. - exceptionCaught(ctx, remoteExc); - return; - } - } else { - remoteExc = null; - } - Call call = id2Call.remove(id); - if (call == null) { - // So we got a response for which we have no corresponding 'call' here on the client-side. - // We probably timed out waiting, cleaned up all references, and now the server decides - // to return a response. There is nothing we can do w/ the response at this stage. Clean - // out the wire of the response so its out of the way and we can get other responses on - // this connection. - if (LOG.isDebugEnabled()) { - int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader); - int whatIsLeftToRead = totalSize - readSoFar; - LOG.debug("Unknown callId: " + id + ", skipping over this response of " + whatIsLeftToRead - + " bytes"); - } - return; - } - call.callStats.setResponseSizeBytes(totalSize); - if (remoteExc != null) { - call.setException(remoteExc); - return; - } try { - finishCall(responseHeader, in, call); + conn.readResponse(new ByteBufInputStream(buf), id2Call, + remoteExc -> exceptionCaught(ctx, remoteExc)); } catch (IOException e) { - // As the call has been removed from id2Call map, if we hit an exception here, the - // exceptionCaught method can not help us finish the call, so here we need to catch the - // exception and finish it - // And in netty, the decoding the frame based, when reaching here we have already read a full + // In netty, the decoding the frame based, when reaching here we have already read a full // frame, so hitting exception here does not mean the stream decoding is broken, thus we do // not need to throw the exception out and close the connection. - call.setException(e); + LOG.warn("failed to process response", e); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 045216e88811..369430e337ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -54,6 +54,8 @@ public interface RpcClient extends Closeable { // The client in 0.99+ does not ping the server. int PING_CALL_ID = -1; + byte[] REGISTRY_PREAMBLE_HEADER = new byte[] { 'R', 'e', 'g', 'i', 's', 't' }; + /** * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up protobuf * blocking stubs. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index dbe6ed1648df..65f936d6fc38 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -17,12 +17,17 @@ */ package org.apache.hadoop.hbase.ipc; +import java.io.DataInput; +import java.io.EOFException; import java.io.IOException; +import java.io.InputStream; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; @@ -34,12 +39,15 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; @@ -48,6 +56,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; /** @@ -72,6 +82,8 @@ abstract class RpcConnection { protected final CompressionCodec compressor; + protected final CellBlockBuilder cellBlockBuilder; + protected final MetricsConnection metrics; private final Map connectionAttributes; @@ -90,10 +102,12 @@ abstract class RpcConnection { protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId, String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, - MetricsConnection metrics, Map connectionAttributes) throws IOException { + CellBlockBuilder cellBlockBuilder, MetricsConnection metrics, + Map connectionAttributes) throws IOException { this.timeoutTimer = timeoutTimer; this.codec = codec; this.compressor = compressor; + this.cellBlockBuilder = cellBlockBuilder; this.conf = conf; this.metrics = metrics; this.connectionAttributes = connectionAttributes; @@ -150,14 +164,13 @@ protected byte[] getConnectionHeaderPreamble() { // Assemble the preamble up in a buffer first and then send it. Writing individual elements, // they are getting sent across piecemeal according to wireshark and then server is messing // up the reading on occasion (the passed in stream is not buffered yet). - - // Preamble is six bytes -- 'HBas' + VERSION + AUTH_CODE int rpcHeaderLen = HConstants.RPC_HEADER.length; + // Preamble is six bytes -- 'HBas' + VERSION + AUTH_CODE byte[] preamble = new byte[rpcHeaderLen + 2]; System.arraycopy(HConstants.RPC_HEADER, 0, preamble, 0, rpcHeaderLen); preamble[rpcHeaderLen] = HConstants.RPC_CURRENT_VERSION; synchronized (this) { - preamble[rpcHeaderLen + 1] = provider.getSaslAuthMethod().getCode(); + preamble[preamble.length - 1] = provider.getSaslAuthMethod().getCode(); } return preamble; } @@ -238,4 +251,103 @@ public void setLastTouched(long lastTouched) { * Does the clean up work after the connection is removed from the connection pool */ public abstract void cleanupConnection(); + + protected Call connectionRegistryCall; + + private void finishCall(ResponseHeader responseHeader, T in, + Call call) throws IOException { + Message value; + if (call.responseDefaultType != null) { + Message.Builder builder = call.responseDefaultType.newBuilderForType(); + if (!builder.mergeDelimitedFrom(in)) { + // The javadoc of mergeDelimitedFrom says returning false means the stream reaches EOF + // before reading any bytes out, so here we need to manually finish create the EOFException + // and finish the call + call.setException(new EOFException("EOF while reading response with type: " + + call.responseDefaultType.getClass().getName())); + return; + } + value = builder.build(); + } else { + value = null; + } + CellScanner cellBlockScanner; + if (responseHeader.hasCellBlockMeta()) { + int size = responseHeader.getCellBlockMeta().getLength(); + // Maybe we could read directly from the ByteBuf. + // The problem here is that we do not know when to release it. + byte[] cellBlock = new byte[size]; + in.readFully(cellBlock); + cellBlockScanner = cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); + } else { + cellBlockScanner = null; + } + call.setResponse(value, cellBlockScanner); + } + + void readResponse(T in, Map id2Call, + Consumer fatalConnectionErrorConsumer) throws IOException { + int totalSize = in.readInt(); + ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); + int id = responseHeader.getCallId(); + if (LOG.isTraceEnabled()) { + LOG.trace("got response header " + TextFormat.shortDebugString(responseHeader) + + ", totalSize: " + totalSize + " bytes"); + } + RemoteException remoteExc; + if (responseHeader.hasException()) { + ExceptionResponse exceptionResponse = responseHeader.getException(); + remoteExc = IPCUtil.createRemoteException(exceptionResponse); + if (IPCUtil.isFatalConnectionException(exceptionResponse)) { + // Here we will cleanup all calls so do not need to fall back, just return. + fatalConnectionErrorConsumer.accept(remoteExc); + if (connectionRegistryCall != null) { + connectionRegistryCall.setException(remoteExc); + connectionRegistryCall = null; + } + return; + } + } else { + remoteExc = null; + } + if (id < 0) { + if (connectionRegistryCall != null) { + LOG.debug("process connection registry call"); + finishCall(responseHeader, in, connectionRegistryCall); + connectionRegistryCall = null; + return; + } + } + Call call = id2Call.remove(id); + if (call == null) { + // So we got a response for which we have no corresponding 'call' here on the client-side. + // We probably timed out waiting, cleaned up all references, and now the server decides + // to return a response. There is nothing we can do w/ the response at this stage. Clean + // out the wire of the response so its out of the way and we can get other responses on + // this connection. + if (LOG.isDebugEnabled()) { + int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader); + int whatIsLeftToRead = totalSize - readSoFar; + LOG.debug("Unknown callId: " + id + ", skipping over this response of " + whatIsLeftToRead + + " bytes"); + } + return; + } + call.callStats.setResponseSizeBytes(totalSize); + if (remoteExc != null) { + call.setException(remoteExc); + return; + } + try { + finishCall(responseHeader, in, call); + } catch (IOException e) { + // As the call has been removed from id2Call map, if we hit an exception here, the + // exceptionCaught method can not help us finish the call, so here we need to catch the + // exception and finish it + call.setException(e); + // throw the exception out, the upper layer should determine whether this is a critical + // problem + throw e; + } + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java index 712d4035448b..b573a5ee771e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.security.provider; +import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.yetus.audience.InterfaceAudience; /** @@ -35,4 +36,9 @@ public abstract class BuiltInSaslAuthenticationProvider implements SaslAuthentic public String getTokenKind() { return AUTH_TOKEN_TYPE; } + + protected static SaslAuthMethod createSaslAuthMethod(AuthMethod authMethod) { + return new SaslAuthMethod(authMethod.name(), authMethod.code, authMethod.mechanismName, + authMethod.authenticationMethod); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java index d71c07d1575a..f22a06474aef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.security.provider; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.yetus.audience.InterfaceAudience; /** @@ -26,8 +26,7 @@ @InterfaceAudience.Private public class DigestSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = - new SaslAuthMethod("DIGEST", (byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); + public static final SaslAuthMethod SASL_AUTH_METHOD = createSaslAuthMethod(AuthMethod.DIGEST); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java index 7dea40f2657a..df6fce859b7f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.security.provider; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.yetus.audience.InterfaceAudience; /** @@ -26,8 +26,7 @@ @InterfaceAudience.Private public class GssSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = - new SaslAuthMethod("KERBEROS", (byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS); + public static final SaslAuthMethod SASL_AUTH_METHOD = createSaslAuthMethod(AuthMethod.KERBEROS); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java index 01b1f452685a..9d79b648c6e4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.security.provider; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.yetus.audience.InterfaceAudience; /** @@ -25,8 +25,8 @@ */ @InterfaceAudience.Private public class SimpleSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = - new SaslAuthMethod("SIMPLE", (byte) 80, "", AuthenticationMethod.SIMPLE); + + public static final SaslAuthMethod SASL_AUTH_METHOD = createSaslAuthMethod(AuthMethod.SIMPLE); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java index 3b792a5bd15f..30d69d4b3f9e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java @@ -21,15 +21,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; /** * Registry that does nothing. Otherwise, default Registry wants zookeeper up and running. */ @InterfaceAudience.Private -class DoNothingConnectionRegistry implements ConnectionRegistry { +public class DoNothingConnectionRegistry implements ConnectionRegistry { - public DoNothingConnectionRegistry(Configuration conf) { + public DoNothingConnectionRegistry(Configuration conf, User user) { } @Override diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index 4bf425ed562e..f65c7ccb6e75 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -141,9 +142,9 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(adminStub).stopServer(any(HBaseRpcController.class), any(StopServerRequest.class), any()); - - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, - UserProvider.instantiate(CONF).getCurrent()) { + User user = UserProvider.instantiate(CONF).getCurrent(); + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null, + user) { @Override CompletableFuture getMasterStub() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java index ff4a92ae394d..e56fffbb2642 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -63,15 +64,15 @@ public class TestAsyncConnectionTracing { @Before public void setUp() throws IOException { - ConnectionRegistry registry = new DoNothingConnectionRegistry(CONF) { + User user = UserProvider.instantiate(CONF).getCurrent(); + ConnectionRegistry registry = new DoNothingConnectionRegistry(CONF, user) { @Override public CompletableFuture getActiveMaster() { return CompletableFuture.completedFuture(masterServer); } }; - conn = new AsyncConnectionImpl(CONF, registry, "test", null, - UserProvider.instantiate(CONF).getCurrent()); + conn = new AsyncConnectionImpl(CONF, registry, "test", null, user); } @After diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java index b306500c8b13..6380f1f6fb0f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.FutureUtils; @@ -45,8 +47,8 @@ public class TestAsyncMetaRegionLocatorFailFast { private static final class FaultyConnectionRegistry extends DoNothingConnectionRegistry { - public FaultyConnectionRegistry(Configuration conf) { - super(conf); + public FaultyConnectionRegistry(Configuration conf, User user) { + super(conf, user); } @Override @@ -56,8 +58,9 @@ public CompletableFuture getMetaRegionLocations() { } @BeforeClass - public static void setUp() { - LOCATOR = new AsyncMetaRegionLocator(new FaultyConnectionRegistry(CONF)); + public static void setUp() throws IOException { + LOCATOR = new AsyncMetaRegionLocator( + new FaultyConnectionRegistry(CONF, UserProvider.instantiate(CONF).getCurrent())); } @Test(expected = DoNotRetryIOException.class) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index 335894303c08..a7df92999d08 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -92,13 +93,14 @@ public void setUp() throws IOException { ServerName.valueOf("127.0.0.2", 12345, EnvironmentEdgeManager.currentTime())), new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2), ServerName.valueOf("127.0.0.3", 12345, EnvironmentEdgeManager.currentTime()))); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF) { + User user = UserProvider.instantiate(CONF).getCurrent(); + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user) { @Override public CompletableFuture getMetaRegionLocations() { return CompletableFuture.completedFuture(locs); } - }, "test", null, UserProvider.instantiate(CONF).getCurrent()); + }, "test", null, user); } @After diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index e57967ae7211..cb5431c35d3e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -160,8 +161,9 @@ public Void answer(InvocationOnMock invocation) throws Throwable { return null; } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, - UserProvider.instantiate(CONF).getCurrent()) { + User user = UserProvider.instantiate(CONF).getCurrent(); + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null, + user) { @Override AsyncRegionLocator getLocator() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index f9b86221af1e..2cecc974b6ef 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -209,37 +209,37 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); final User user = UserProvider.instantiate(CONF).getCurrent(); - conn = - new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, user) { - - @Override - AsyncRegionLocator getLocator() { - AsyncRegionLocator locator = mock(AsyncRegionLocator.class); - Answer> answer = - new Answer>() { - - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; - doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), - any(RegionLocateType.class), anyLong()); - doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), - anyInt(), any(RegionLocateType.class), anyLong()); - return locator; - } + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null, + user) { - @Override - ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { - return stub; - } - }; + @Override + AsyncRegionLocator getLocator() { + AsyncRegionLocator locator = mock(AsyncRegionLocator.class); + Answer> answer = + new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; + doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), + any(RegionLocateType.class), anyLong()); + doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), + anyInt(), any(RegionLocateType.class), anyLong()); + return locator; + } + + @Override + ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { + return stub; + } + }; table = conn.getTable(TableName.valueOf("table"), ForkJoinPool.commonPool()); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java index a2df7e932395..bd2ca9867f34 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.FutureUtils; @@ -49,8 +50,8 @@ public static final class ConnectionRegistryForTest extends DoNothingConnectionR private boolean closed = false; - public ConnectionRegistryForTest(Configuration conf) { - super(conf); + public ConnectionRegistryForTest(Configuration conf, User user) { + super(conf, user); CREATED.add(this); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java index 54b351f00a3b..08c56fe95868 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java @@ -58,7 +58,9 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ConnectionRegistryService; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetClusterIdResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryResponse; @Category({ ClientTests.class, SmallTests.class }) public class TestRpcBasedRegistryHedgedReads { @@ -132,6 +134,12 @@ public static final class RpcChannelImpl implements RpcChannel { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, Message responsePrototype, RpcCallback done) { + if (method.getService().equals(ConnectionRegistryService.getDescriptor())) { + // this is for setting up the rpc client + done.run( + GetConnectionRegistryResponse.newBuilder().setClusterId(RESP.getClusterId()).build()); + return; + } if (!method.getName().equals("GetClusterId")) { // On RPC failures, MasterRegistry internally runs getMasters() RPC to keep the master list // fresh. We do not want to intercept those RPCs here and double count. @@ -155,9 +163,9 @@ public void callMethod(MethodDescriptor method, RpcController controller, Messag private AbstractRpcBasedConnectionRegistry createRegistry(int hedged) throws IOException { Configuration conf = UTIL.getConfiguration(); conf.setInt(HEDGED_REQS_FANOUT_CONFIG_NAME, hedged); - return new AbstractRpcBasedConnectionRegistry(conf, HEDGED_REQS_FANOUT_CONFIG_NAME, - INITIAL_DELAY_SECS_CONFIG_NAME, REFRESH_INTERVAL_SECS_CONFIG_NAME, - MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME) { + return new AbstractRpcBasedConnectionRegistry(conf, User.getCurrent(), + HEDGED_REQS_FANOUT_CONFIG_NAME, INITIAL_DELAY_SECS_CONFIG_NAME, + REFRESH_INTERVAL_SECS_CONFIG_NAME, MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME) { @Override protected Set getBootstrapNodes(Configuration conf) throws IOException { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java index 10948358ff92..8aadce85651d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestTLSHandshadeFailure.java @@ -56,6 +56,8 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.io.netty.handler.ssl.NotSslRecordException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService; + /** * A simple UT to make sure that we do not leak the SslExceptions to netty's TailContext, where it * will generate a confusing WARN message. @@ -149,11 +151,12 @@ public Void answer(InvocationOnMock invocation) throws Throwable { Address.fromParts("127.0.0.1", server.getLocalPort())); NettyRpcConnection conn = client.createConnection(id); BlockingRpcCallback done = new BlockingRpcCallback<>(); - Call call = - new Call(1, null, null, null, null, 0, 0, Collections.emptyMap(), done, new CallStats()); + Call call = new Call(1, ClientMetaService.getDescriptor().getMethods().get(0), null, null, null, + 0, 0, Collections.emptyMap(), done, new CallStats()); HBaseRpcController hrc = new HBaseRpcControllerImpl(); conn.sendRequest(call, hrc); done.get(); + call.error.printStackTrace(); assertThat(call.error, instanceOf(NotSslRecordException.class)); Waiter.waitFor(conf, 5000, () -> msg.get() != null); verify(mockAppender).append(any()); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentMapUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentMapUtils.java index cf8130e624e2..9c5ebe8519f4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentMapUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentMapUtils.java @@ -38,14 +38,6 @@ public static V computeIfAbsent(ConcurrentMap map, K key, Supplier< }); } - /** - * A supplier that throws IOException when get. - */ - @FunctionalInterface - public interface IOExceptionSupplier { - V get() throws IOException; - } - /** * In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the * value already exists. So here we copy the implementation of diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionSupplier.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionSupplier.java new file mode 100644 index 000000000000..11771a47c083 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IOExceptionSupplier.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A supplier that throws IOException when get. + */ +@InterfaceAudience.Private +@FunctionalInterface +public interface IOExceptionSupplier { + V get() throws IOException; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto b/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto index 8dd0d1abdf3d..f55b892413b2 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto @@ -105,4 +105,21 @@ service ClientMetaService { * Get nodes which could be used as ClientMetaService */ rpc GetBootstrapNodes(GetBootstrapNodesRequest) returns (GetBootstrapNodesResponse); -} \ No newline at end of file +} + +message GetConnectionRegistryRequest { +} + +/** + * For returning connection registry information to client, like cluster id + */ +message GetConnectionRegistryResponse { + required string cluster_id = 1; +} + +/** + * Just a fake rpc service for getting connection registry information + */ +service ConnectionRegistryService { + rpc GetConnectionRegistry(GetConnectionRegistryRequest) returns(GetConnectionRegistryResponse); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java index 579da46af1c1..7225f92b7ff9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java @@ -64,7 +64,7 @@ private static AsyncClusterConnection createAsyncClusterConnection(Configuration */ public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf, SocketAddress localAddress, User user) throws IOException { - return createAsyncClusterConnection(conf, ConnectionRegistryFactory.getRegistry(conf), + return createAsyncClusterConnection(conf, ConnectionRegistryFactory.getRegistry(conf, user), localAddress, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index 629b3468cbe5..1d93fbd0f668 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -63,7 +63,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.WriteBufferWaterMark; import org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup; import org.apache.hbase.thirdparty.io.netty.channel.group.DefaultChannelGroup; -import org.apache.hbase.thirdparty.io.netty.handler.codec.FixedLengthFrameDecoder; import org.apache.hbase.thirdparty.io.netty.handler.ssl.OptionalSslHandler; import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslContext; import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslHandler; @@ -167,13 +166,15 @@ protected void initChannel(Channel ch) throws Exception { ch.config().setWriteBufferWaterMark(writeBufferWaterMark); ch.config().setAllocator(channelAllocator); ChannelPipeline pipeline = ch.pipeline(); - FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); - preambleDecoder.setSingleDecode(true); + NettyServerRpcConnection conn = createNettyServerRpcConnection(ch); + if (conf.getBoolean(HBASE_SERVER_NETTY_TLS_ENABLED, false)) { initSSL(pipeline, conn, conf.getBoolean(HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT, true)); } - pipeline.addLast(NettyRpcServerPreambleHandler.DECODER_NAME, preambleDecoder) + pipeline + .addLast(NettyRpcServerPreambleHandler.DECODER_NAME, + NettyRpcServerPreambleHandler.createDecoder()) .addLast(new NettyRpcServerPreambleHandler(NettyRpcServer.this, conn)) // We need NettyRpcServerResponseEncoder here because NettyRpcServerPreambleHandler may // send RpcResponse to client. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index 02e1b5858117..5aa77e0e8ace 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.ipc.ServerRpcConnection.PreambleResponse; import org.apache.hadoop.hbase.util.NettyFutureUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -25,6 +26,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; +import org.apache.hbase.thirdparty.io.netty.handler.codec.FixedLengthFrameDecoder; import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; /** @@ -45,6 +47,12 @@ public NettyRpcServerPreambleHandler(NettyRpcServer rpcServer, NettyServerRpcCon this.conn = conn; } + static FixedLengthFrameDecoder createDecoder() { + FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6); + preambleDecoder.setSingleDecode(true); + return preambleDecoder; + } + @Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { if (processPreambleError) { @@ -57,11 +65,19 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep ByteBuffer buf = ByteBuffer.allocate(msg.readableBytes()); msg.readBytes(buf); buf.flip(); - if (!conn.processPreamble(buf)) { + PreambleResponse resp = conn.processPreamble(buf); + if (resp == PreambleResponse.CLOSE) { processPreambleError = true; conn.close(); return; } + if (resp == PreambleResponse.CONTINUE) { + // we use a single decode decoder, so here we need to replace it with a new one so it will + // decode a new preamble header again + ctx.pipeline().replace(DECODER_NAME, DECODER_NAME, createDecoder()); + return; + } + // resp == PreambleResponse.SUCCEED ChannelPipeline p = ctx.pipeline(); if (conn.useSasl) { LengthFieldBasedFrameDecoder decoder = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 4c32b2b6a5fa..be97ad582c37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -41,6 +41,7 @@ import org.apache.commons.crypto.random.CryptoRandomFactory; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.client.ConnectionRegistryEndpoint; import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; @@ -57,6 +58,7 @@ import org.apache.hadoop.hbase.security.provider.SaslServerAuthenticationProviders; import org.apache.hadoop.hbase.security.provider.SimpleSaslServerAuthenticationProvider; import org.apache.hadoop.hbase.trace.TraceUtil; +import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.IntWritable; @@ -87,6 +89,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo; /** Reads calls from a connection and queues them for handling. */ @@ -688,10 +691,32 @@ private void doBadPreambleHandling(String msg) throws IOException { } private void doBadPreambleHandling(String msg, Exception e) throws IOException { - RpcServer.LOG.warn(msg); + RpcServer.LOG.warn(msg, e); doRespond(getErrorResponse(msg, e)); } + private boolean doConnectionRegistryResponse() throws IOException { + if (!(rpcServer.server instanceof ConnectionRegistryEndpoint)) { + // should be in tests or some scenarios where we should not reach here + return false; + } + // on backup masters, this request may be blocked since we need to fetch it from filesystem, + // but since it is just backup master, it is not a critical problem + String clusterId = ((ConnectionRegistryEndpoint) rpcServer.server).getClusterId(); + RpcServer.LOG.debug("Response connection registry, clusterId = '{}'", clusterId); + if (clusterId == null) { + // should be in tests or some scenarios where we should not reach here + return false; + } + GetConnectionRegistryResponse resp = + GetConnectionRegistryResponse.newBuilder().setClusterId(clusterId).build(); + ResponseHeader header = ResponseHeader.newBuilder().setCallId(-1).build(); + ByteBuffer buf = ServerCall.createHeaderAndMessageBytes(resp, header, 0, null); + BufferChain bufChain = new BufferChain(buf); + doRespond(() -> bufChain); + return true; + } + protected final void callCleanupIfNeeded() { if (callCleanup != null) { callCleanup.run(); @@ -699,30 +724,42 @@ protected final void callCleanupIfNeeded() { } } - protected final boolean processPreamble(ByteBuffer preambleBuffer) throws IOException { - assert preambleBuffer.remaining() == 6; - for (int i = 0; i < RPC_HEADER.length; i++) { - if (RPC_HEADER[i] != preambleBuffer.get()) { - doBadPreambleHandling( - "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" - + Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " - + toString()); - return false; - } - } - int version = preambleBuffer.get() & 0xFF; - byte authbyte = preambleBuffer.get(); + protected enum PreambleResponse { + SUCCEED, // successfully processed the rpc preamble header + CONTINUE, // the preamble header is for other purpose, wait for the rpc preamble header + CLOSE // close the rpc connection + } + protected final PreambleResponse processPreamble(ByteBuffer preambleBuffer) throws IOException { + assert preambleBuffer.remaining() == 6; + if ( + ByteBufferUtils.equals(preambleBuffer, preambleBuffer.position(), 6, + RpcClient.REGISTRY_PREAMBLE_HEADER, 0, 6) && doConnectionRegistryResponse() + ) { + return PreambleResponse.CLOSE; + } + if (!ByteBufferUtils.equals(preambleBuffer, preambleBuffer.position(), 4, RPC_HEADER, 0, 4)) { + doBadPreambleHandling( + "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + + Bytes.toStringBinary( + ByteBufferUtils.toBytes(preambleBuffer, preambleBuffer.position(), RPC_HEADER.length), + 0, RPC_HEADER.length) + + " from " + toString()); + return PreambleResponse.CLOSE; + } + int version = preambleBuffer.get(preambleBuffer.position() + 4) & 0xFF; + byte authByte = preambleBuffer.get(preambleBuffer.position() + 5); if (version != RpcServer.CURRENT_VERSION) { - String msg = getFatalConnectionString(version, authbyte); + String msg = getFatalConnectionString(version, authByte); doBadPreambleHandling(msg, new WrongVersionException(msg)); - return false; + return PreambleResponse.CLOSE; } - this.provider = this.saslProviders.selectProvider(authbyte); + + this.provider = this.saslProviders.selectProvider(authByte); if (this.provider == null) { - String msg = getFatalConnectionString(version, authbyte); + String msg = getFatalConnectionString(version, authByte); doBadPreambleHandling(msg, new BadAuthException(msg)); - return false; + return PreambleResponse.CLOSE; } // TODO this is a wart while simple auth'n doesn't go through sasl. if (this.rpcServer.isSecurityEnabled && isSimpleAuthentication()) { @@ -732,7 +769,7 @@ protected final boolean processPreamble(ByteBuffer preambleBuffer) throws IOExce } else { AccessDeniedException ae = new AccessDeniedException("Authentication is required"); doRespond(getErrorResponse(ae.getMessage(), ae)); - return false; + return PreambleResponse.CLOSE; } } if (!this.rpcServer.isSecurityEnabled && !isSimpleAuthentication()) { @@ -745,7 +782,7 @@ protected final boolean processPreamble(ByteBuffer preambleBuffer) throws IOExce skipInitialSaslHandshake = true; } useSasl = !(provider instanceof SimpleSaslServerAuthenticationProvider); - return true; + return PreambleResponse.SUCCEED; } boolean isSimpleAuthentication() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index ac705d7a26fa..9e90a7a31339 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -137,12 +137,21 @@ private int readPreamble() throws IOException { return count; } preambleBuffer.flip(); - if (!processPreamble(preambleBuffer)) { - return -1; + PreambleResponse resp = processPreamble(preambleBuffer); + switch (resp) { + case SUCCEED: + preambleBuffer = null; // do not need it anymore + connectionPreambleRead = true; + return count; + case CONTINUE: + // wait for the next preamble header + preambleBuffer.reset(); + return count; + case CLOSE: + return -1; + default: + throw new IllegalArgumentException("Unknown preamble response: " + resp); } - preambleBuffer = null; // do not need it anymore - connectionPreambleRead = true; - return count; } private int read4Bytes() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index d001ea755b74..cbfde9c7e172 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -50,9 +50,9 @@ import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.util.ConcurrentMapUtils.IOExceptionSupplier; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.IOExceptionSupplier; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index a94c214a3250..509d74e0335c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -40,7 +40,8 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.DummyConnectionRegistry; +import org.apache.hadoop.hbase.client.ConnectionRegistry; +import org.apache.hadoop.hbase.client.DoNothingConnectionRegistry; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.DirScanPool; @@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; @@ -92,9 +94,10 @@ public class TestZooKeeperTableArchiveClient { private static RegionServerServices rss; private static DirScanPool POOL; - public static final class MockRegistry extends DummyConnectionRegistry { + public static final class MockRegistry extends DoNothingConnectionRegistry { - public MockRegistry(Configuration conf) { + public MockRegistry(Configuration conf, User user) { + super(conf, user); } @Override @@ -110,8 +113,8 @@ public CompletableFuture getClusterId() { public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniZKCluster(); - UTIL.getConfiguration().setClass(MockRegistry.REGISTRY_IMPL_CONF_KEY, MockRegistry.class, - DummyConnectionRegistry.class); + UTIL.getConfiguration().setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + MockRegistry.class, ConnectionRegistry.class); CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration()); archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION); // make hfile archiving node so we can archive files diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index 8b38db974d7c..0ff105743e0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; @@ -59,7 +60,7 @@ protected static void startClusterAndCreateTable() throws Exception { UTIL.getAdmin().createTable(td, SPLIT_KEYS); UTIL.waitTableAvailable(TABLE_NAME); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { + ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration(), User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java deleted file mode 100644 index cc2e9493d039..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.util.concurrent.CompletableFuture; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.ServerName; - -/** - * Can be overridden in UT if you only want to implement part of the methods in - * {@link ConnectionRegistry}. - */ -public class DummyConnectionRegistry implements ConnectionRegistry { - - public static final String REGISTRY_IMPL_CONF_KEY = - HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; - - @Override - public CompletableFuture getMetaRegionLocations() { - return null; - } - - @Override - public CompletableFuture getClusterId() { - return null; - } - - @Override - public CompletableFuture getActiveMaster() { - return null; - } - - @Override - public String getConnectionString() { - return null; - } - - @Override - public void close() { - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index 4dd4d4550777..da400f29c0c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -55,7 +56,7 @@ public static void setUpBeforeClass() throws Exception { TestAsyncAdminBase.setUpBeforeClass(); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index ad9bf551c033..90d2cb51e8cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.RegionReplicaTestHelper.Locator; import org.apache.hadoop.hbase.client.trace.StringTraceRenderer; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.trace.OpenTelemetryClassRule; @@ -106,7 +107,8 @@ protected void before() throws Throwable { testUtil = miniClusterRule.getTestingUtility(); HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); testUtil.waitUntilNoRegionsInTransition(); - registry = ConnectionRegistryFactory.getRegistry(testUtil.getConfiguration()); + registry = + ConnectionRegistryFactory.getRegistry(testUtil.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(testUtil, registry); admin.balancerSwitch(false).get(); locator = new AsyncMetaRegionLocator(registry); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index f3b231d5bde8..a6d0ab81f912 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -128,7 +128,7 @@ public void setUpBeforeTest() throws InterruptedException, ExecutionException, I // Enable meta replica LoadBalance mode for this connection. c.set(RegionLocator.LOCATOR_META_REPLICAS_MODE, metaReplicaMode.toString()); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); conn = new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, User.getCurrent()); locator = new AsyncNonMetaRegionLocator(conn); @@ -147,7 +147,7 @@ public void tearDownAfterTest() throws IOException { } @Parameterized.Parameters - public static Collection parameters() { + public static Collection paramAbstractTestRegionLocatoreters() { return Arrays .asList(new Object[][] { { CatalogReplicaMode.NONE }, { CatalogReplicaMode.LOAD_BALANCE } }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 2dd08b36b30d..50c9ab9f5657 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -125,7 +125,7 @@ public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); TEST_UTIL.getAdmin().balancerSwitch(false, true); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), null, User.getCurrent()); LOCATOR = new AsyncNonMetaRegionLocator(CONN); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index ee0963e1f8b3..bacd7bb32d70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -100,7 +100,7 @@ public static void setUp() throws Exception { TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAME); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), null, User.getCurrent()); LOCATOR = CONN.getLocator(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index ab43ec545d93..3c8327145f32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -73,7 +73,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAME); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), null, User.getCurrent()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 1a7ac8819e49..0de59a4c32bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -94,7 +95,8 @@ public static void setUp() throws Exception { FailPrimaryMetaScanCp.class.getName()); UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); - try (ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf)) { + try (ConnectionRegistry registry = + ConnectionRegistryFactory.getRegistry(conf, User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java index d5b0ee18e594..549575f4f404 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.regionserver.BootstrapNodeManager; +import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.junit.AfterClass; @@ -65,7 +66,7 @@ public static void setUpBeforeClass() throws Exception { conf.setLong(RpcConnectionRegistry.PERIODIC_REFRESH_INTERVAL_SECS, 1); conf.setLong(RpcConnectionRegistry.MIN_SECS_BETWEEN_REFRESHES, 1); UTIL.startMiniCluster(3); - REGISTRY = new RpcConnectionRegistry(conf); + REGISTRY = new RpcConnectionRegistry(conf, UserProvider.instantiate(conf).getCurrent()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 0e2feae841cf..5c78e53f7e60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -77,7 +77,8 @@ public static void setUp() throws Exception { () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() >= numOfMetaReplica); - registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + registry = + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(), null, User.getCurrent()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index c9238bc99978..d79603cea3cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.AfterClass; @@ -85,45 +85,39 @@ private static String generateDummyMastersList(int size) { * Makes sure the master registry parses the master end points in the configuration correctly. */ @Test - public void testMasterAddressParsing() throws IOException { + public void testMasterAddressParsing() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); int numMasters = 10; conf.set(HConstants.MASTER_ADDRS_KEY, generateDummyMastersList(numMasters)); - try (MasterRegistry registry = new MasterRegistry(conf)) { - List parsedMasters = new ArrayList<>(registry.getParsedServers()); - // Half of them would be without a port, duplicates are removed. - assertEquals(numMasters / 2 + 1, parsedMasters.size()); - // Sort in the increasing order of port numbers. - Collections.sort(parsedMasters, Comparator.comparingInt(ServerName::getPort)); - for (int i = 0; i < parsedMasters.size(); i++) { - ServerName sn = parsedMasters.get(i); - assertEquals("localhost", sn.getHostname()); - if (i == parsedMasters.size() - 1) { - // Last entry should be the one with default port. - assertEquals(HConstants.DEFAULT_MASTER_PORT, sn.getPort()); - } else { - assertEquals(1000 + (2 * i), sn.getPort()); - } + List parsedMasters = new ArrayList<>(MasterRegistry.parseMasterAddrs(conf)); + // Half of them would be without a port, duplicates are removed. + assertEquals(numMasters / 2 + 1, parsedMasters.size()); + // Sort in the increasing order of port numbers. + Collections.sort(parsedMasters, Comparator.comparingInt(ServerName::getPort)); + for (int i = 0; i < parsedMasters.size(); i++) { + ServerName sn = parsedMasters.get(i); + assertEquals("localhost", sn.getHostname()); + if (i == parsedMasters.size() - 1) { + // Last entry should be the one with default port. + assertEquals(HConstants.DEFAULT_MASTER_PORT, sn.getPort()); + } else { + assertEquals(1000 + (2 * i), sn.getPort()); } } } @Test - public void testMasterPortDefaults() throws IOException { + public void testMasterPortDefaults() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.set(HConstants.MASTER_ADDRS_KEY, "localhost"); - try (MasterRegistry registry = new MasterRegistry(conf)) { - List parsedMasters = new ArrayList<>(registry.getParsedServers()); - ServerName sn = parsedMasters.get(0); - assertEquals(HConstants.DEFAULT_MASTER_PORT, sn.getPort()); - } + List parsedMasters = new ArrayList<>(MasterRegistry.parseMasterAddrs(conf)); + ServerName sn = parsedMasters.get(0); + assertEquals(HConstants.DEFAULT_MASTER_PORT, sn.getPort()); final int CUSTOM_MASTER_PORT = 9999; conf.setInt(HConstants.MASTER_PORT, CUSTOM_MASTER_PORT); - try (MasterRegistry registry = new MasterRegistry(conf)) { - List parsedMasters = new ArrayList<>(registry.getParsedServers()); - ServerName sn = parsedMasters.get(0); - assertEquals(CUSTOM_MASTER_PORT, sn.getPort()); - } + parsedMasters = new ArrayList<>(MasterRegistry.parseMasterAddrs(conf)); + sn = parsedMasters.get(0); + assertEquals(CUSTOM_MASTER_PORT, sn.getPort()); } @Test @@ -133,7 +127,7 @@ public void testRegistryRPCs() throws Exception { final int size = activeMaster.getMetaLocations().size(); for (int numHedgedReqs = 1; numHedgedReqs <= size; numHedgedReqs++) { conf.setInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, numHedgedReqs); - try (MasterRegistry registry = new MasterRegistry(conf)) { + try (MasterRegistry registry = new MasterRegistry(conf, User.getCurrent())) { // Add wait on all replicas being assigned before proceeding w/ test. Failed on occasion // because not all replicas had made it up before test started. RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); @@ -166,7 +160,7 @@ public void testDynamicMasterConfigurationRefresh() throws Exception { conf.setInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, 4); // Do not limit the number of refreshes during the test run. conf.setLong(MasterRegistry.MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES, 0); - try (MasterRegistry registry = new MasterRegistry(conf)) { + try (MasterRegistry registry = new MasterRegistry(conf, User.getCurrent())) { final Set masters = registry.getParsedServers(); assertTrue(masters.contains(badServer)); // Make a registry RPC, this should trigger a refresh since one of the hedged RPC fails. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index d78832d9a8a0..beb054eaf366 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.JVMClusterUtil; @@ -63,7 +64,8 @@ public class TestMetaRegionLocationCache { public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); - REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + REGISTRY = + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); TEST_UTIL.getAdmin().balancerSwitch(false, true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java index 9c26bccbbb31..d33cc943355c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java @@ -17,22 +17,29 @@ */ package org.apache.hadoop.hbase.client; -import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.BootstrapNodeManager; import org.apache.hadoop.hbase.regionserver.RSRpcServices; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; @@ -43,6 +50,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ MediumTests.class, ClientTests.class }) @@ -74,7 +82,7 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws IOException { - registry = new RpcConnectionRegistry(UTIL.getConfiguration()); + registry = new RpcConnectionRegistry(UTIL.getConfiguration(), User.getCurrent()); } @After @@ -94,9 +102,20 @@ private void setMaxNodeCount(int count) { @Test public void testRegistryRPCs() throws Exception { HMaster activeMaster = UTIL.getHBaseCluster().getMaster(); - // sleep 3 seconds, since our initial delay is 1 second, we should have refreshed the endpoints - Thread.sleep(3000); - assertThat(registry.getParsedServers(), + // should only contains the active master + Set initialParsedServers = registry.getParsedServers(); + assertThat(initialParsedServers, hasSize(1)); + // no start code in configuration + assertThat(initialParsedServers, + hasItem(ServerName.valueOf(activeMaster.getServerName().getHostname(), + activeMaster.getServerName().getPort(), -1))); + // Since our initial delay is 1 second, finally we should have refreshed the endpoints + UTIL.waitFor(5000, () -> registry.getParsedServers() + .contains(activeMaster.getServerManager().getOnlineServersList().get(0))); + Set parsedServers = registry.getParsedServers(); + assertThat(parsedServers, + hasSize(activeMaster.getServerManager().getOnlineServersList().size())); + assertThat(parsedServers, hasItems(activeMaster.getServerManager().getOnlineServersList().toArray(new ServerName[0]))); // Add wait on all replicas being assigned before proceeding w/ test. Failed on occasion @@ -116,4 +135,32 @@ public void testRegistryRPCs() throws Exception { setMaxNodeCount(1); UTIL.waitFor(10000, () -> registry.getParsedServers().size() == 1); } + + /** + * Make sure that we can create the RpcClient when there are broken servers in the bootstrap nodes + */ + @Test + public void testBrokenBootstrapNodes() throws Exception { + Configuration conf = new Configuration(UTIL.getConfiguration()); + String currentMasterAddrs = Preconditions.checkNotNull(conf.get(HConstants.MASTER_ADDRS_KEY)); + HMaster activeMaster = UTIL.getHBaseCluster().getMaster(); + String clusterId = activeMaster.getClusterId(); + // Add a non-working master + ServerName badServer = ServerName.valueOf("localhost", 1234, -1); + conf.set(RpcConnectionRegistry.BOOTSTRAP_NODES, badServer.toShortString()); + // only a bad server, the request should fail + try (RpcConnectionRegistry reg = new RpcConnectionRegistry(conf, User.getCurrent())) { + assertThrows(IOException.class, () -> reg.getParsedServers()); + } + + conf.set(RpcConnectionRegistry.BOOTSTRAP_NODES, + badServer.toShortString() + ", " + currentMasterAddrs); + // we will choose bootstrap node randomly so here we need to test it multiple times to make sure + // that we can skip the broken node + for (int i = 0; i < 10; i++) { + try (RpcConnectionRegistry reg = new RpcConnectionRegistry(conf, User.getCurrent())) { + assertEquals(clusterId, reg.getClusterId().get()); + } + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 1cbb36196684..6d585245e959 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -65,7 +65,7 @@ public class TestZKConnectionRegistry { public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); - REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration()); + REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration(), null); } @AfterClass @@ -99,7 +99,7 @@ public void testIndependentZKConnections() throws IOException { try (ReadOnlyZKClient zk1 = REGISTRY.getZKClient()) { Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); - try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { + try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf, null)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); assertNotSame("Using a different configuration / quorum should result in different " + "backing zk connection.", zk1, zk2); @@ -116,7 +116,7 @@ public void testIndependentZKConnections() throws IOException { public void testNoMetaAvailable() throws InterruptedException { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.set("zookeeper.znode.metaserver", "whatever"); - try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { + try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf, null)) { try { registry.getMetaRegionLocations().get(); fail("Should have failed since we set an incorrect meta znode prefix"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index a93f54d4d9d1..e4427c1690c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -42,8 +43,10 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static org.mockito.internal.verification.VerificationModeFactory.times; import io.opentelemetry.api.common.AttributeKey; @@ -63,12 +66,18 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseServerBase; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MatcherPredicate; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.hamcrest.Matcher; import org.junit.Rule; @@ -78,6 +87,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto; @@ -88,6 +99,9 @@ import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.Interface; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ConnectionRegistryService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryResponse; /** * Some basic ipc tests. @@ -105,9 +119,14 @@ public abstract class AbstractTestIPC { CONF.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, SimpleRpcServer.class.getName()); } - protected abstract RpcServer createRpcServer(final String name, - final List services, final InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException; + protected abstract RpcServer createRpcServer(Server server, String name, + List services, InetSocketAddress bindAddress, Configuration conf, + RpcScheduler scheduler) throws IOException; + + private RpcServer createRpcServer(String name, List services, + InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { + return createRpcServer(null, name, services, bindAddress, conf, scheduler); + } protected abstract AbstractRpcClient createRpcClientNoCodec(Configuration conf); @@ -568,4 +587,62 @@ public void testBadPreambleHeader() throws IOException, ServiceException { rpcServer.stop(); } } + + /** + * Testcase for getting connection registry information through connection preamble header, see + * HBASE-25051 for more details. + */ + @Test + public void testGetConnectionRegistry() throws IOException, ServiceException { + Configuration clientConf = new Configuration(CONF); + String clusterId = "test_cluster_id"; + HBaseServerBase server = mock(HBaseServerBase.class); + when(server.getClusterId()).thenReturn(clusterId); + // do not need any services + RpcServer rpcServer = createRpcServer(server, "testRpcServer", Collections.emptyList(), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); + try (AbstractRpcClient client = createRpcClient(clientConf)) { + rpcServer.start(); + InetSocketAddress addr = rpcServer.getListenerAddress(); + BlockingRpcChannel channel = + client.createBlockingRpcChannel(ServerName.valueOf(addr.getHostName(), addr.getPort(), + EnvironmentEdgeManager.currentTime()), User.getCurrent(), 0); + ConnectionRegistryService.BlockingInterface stub = + ConnectionRegistryService.newBlockingStub(channel); + GetConnectionRegistryResponse resp = + stub.getConnectionRegistry(null, GetConnectionRegistryRequest.getDefaultInstance()); + assertEquals(clusterId, resp.getClusterId()); + } + } + + /** + * Test server does not support getting connection registry information through connection + * preamble header, i.e, a new client connecting to an old server. We simulate this by using a + * Server without implementing the ConnectionRegistryEndpoint interface. + */ + @Test + public void testGetConnectionRegistryError() throws IOException, ServiceException { + Configuration clientConf = new Configuration(CONF); + // do not need any services + RpcServer rpcServer = createRpcServer("testRpcServer", Collections.emptyList(), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); + try (AbstractRpcClient client = createRpcClient(clientConf)) { + rpcServer.start(); + InetSocketAddress addr = rpcServer.getListenerAddress(); + RpcChannel channel = client.createRpcChannel(ServerName.valueOf(addr.getHostName(), + addr.getPort(), EnvironmentEdgeManager.currentTime()), User.getCurrent(), 0); + ConnectionRegistryService.Interface stub = ConnectionRegistryService.newStub(channel); + HBaseRpcController pcrc = new HBaseRpcControllerImpl(); + BlockingRpcCallback done = new BlockingRpcCallback<>(); + stub.getConnectionRegistry(pcrc, GetConnectionRegistryRequest.getDefaultInstance(), done); + // should have failed so no response + assertNull(done.get()); + assertTrue(pcrc.failed()); + // should be a FatalConnectionException + assertThat(pcrc.getFailed(), instanceOf(RemoteException.class)); + assertEquals(FatalConnectionException.class.getName(), + ((RemoteException) pcrc.getFailed()).getClassName()); + assertThat(pcrc.getFailed().getMessage(), startsWith("Expected HEADER=")); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java index 9544e8c35458..e60cc879fd4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java @@ -40,10 +40,10 @@ public class TestBlockingIPC extends AbstractTestIPC { HBaseClassTestRule.forClass(TestBlockingIPC.class); @Override - protected RpcServer createRpcServer(String name, + protected RpcServer createRpcServer(Server server, String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { - return RpcServerFactory.createRpcServer(null, name, services, bindAddress, conf, scheduler); + return RpcServerFactory.createRpcServer(server, name, services, bindAddress, conf, scheduler); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java index 6feab5f2cac8..a1b60e2cfa45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java @@ -24,6 +24,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RPCTests; @@ -103,10 +104,10 @@ private void setConf(Configuration conf) { } @Override - protected RpcServer createRpcServer(String name, + protected RpcServer createRpcServer(Server server, String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { - return new NettyRpcServer(null, name, services, bindAddress, conf, scheduler, true); + return new NettyRpcServer(server, name, services, bindAddress, conf, scheduler, true); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java index 4c654123e130..1cbf6be26c65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseServerBase; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.client.ConnectionRegistryEndpoint; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.crypto.tls.KeyStoreFileType; import org.apache.hadoop.hbase.io.crypto.tls.X509KeyType; @@ -65,8 +67,6 @@ public class TestNettyTlsIPC extends AbstractTestIPC { private static NettyEventLoopGroupConfig EVENT_LOOP_GROUP_CONFIG; - private static HBaseServerBase SERVER; - @Parameterized.Parameter(0) public X509KeyType caKeyType; @@ -115,8 +115,6 @@ public static void setUpBeforeClass() throws IOException { PROVIDER = new X509TestContextProvider(CONF, dir); EVENT_LOOP_GROUP_CONFIG = NettyEventLoopGroupConfig.setup(CONF, TestNettyTlsIPC.class.getSimpleName()); - SERVER = mock(HBaseServerBase.class); - when(SERVER.getEventLoopGroupConfig()).thenReturn(EVENT_LOOP_GROUP_CONFIG); } @AfterClass @@ -147,9 +145,16 @@ public void tearDown() { } @Override - protected RpcServer createRpcServer(String name, List services, - InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { - return new NettyRpcServer(SERVER, name, services, bindAddress, conf, scheduler, true); + protected RpcServer createRpcServer(Server server, String name, + List services, InetSocketAddress bindAddress, Configuration conf, + RpcScheduler scheduler) throws IOException { + HBaseServerBase mockServer = mock(HBaseServerBase.class); + when(mockServer.getEventLoopGroupConfig()).thenReturn(EVENT_LOOP_GROUP_CONFIG); + if (server instanceof ConnectionRegistryEndpoint) { + String clusterId = ((ConnectionRegistryEndpoint) server).getClusterId(); + when(mockServer.getClusterId()).thenReturn(clusterId); + } + return new NettyRpcServer(mockServer, name, services, bindAddress, conf, scheduler, true); } @Override @@ -184,7 +189,9 @@ protected boolean isTcpNoDelay() { protected RpcServer createTestFailingRpcServer(String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { - return new FailingNettyRpcServer(SERVER, name, services, bindAddress, conf, scheduler); + HBaseServerBase mockServer = mock(HBaseServerBase.class); + when(mockServer.getEventLoopGroupConfig()).thenReturn(EVENT_LOOP_GROUP_CONFIG); + return new FailingNettyRpcServer(mockServer, name, services, bindAddress, conf, scheduler); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java index 93fa22c00fd3..d6c7a0250015 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java @@ -34,15 +34,17 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncTable; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistry; +import org.apache.hadoop.hbase.client.DoNothingConnectionRegistry; import org.apache.hadoop.hbase.client.DummyAsyncClusterConnection; import org.apache.hadoop.hbase.client.DummyAsyncTable; -import org.apache.hadoop.hbase.client.DummyConnectionRegistry; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ReplicationTests; @@ -84,8 +86,8 @@ public class TestWALEntrySinkFilter { public void testWALEntryFilter() throws IOException { Configuration conf = HBaseConfiguration.create(); // Make it so our filter is instantiated on construction of ReplicationSink. - conf.setClass(DummyConnectionRegistry.REGISTRY_IMPL_CONF_KEY, DevNullConnectionRegistry.class, - DummyConnectionRegistry.class); + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + DevNullConnectionRegistry.class, ConnectionRegistry.class); conf.setClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class); conf.setClass(ClusterConnectionFactory.HBASE_SERVER_CLUSTER_CONNECTION_IMPL, @@ -166,9 +168,10 @@ public boolean filter(TableName table, long writeTime) { } } - public static class DevNullConnectionRegistry extends DummyConnectionRegistry { + public static class DevNullConnectionRegistry extends DoNothingConnectionRegistry { - public DevNullConnectionRegistry(Configuration conf) { + public DevNullConnectionRegistry(Configuration conf, User user) { + super(conf, user); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java index 16ac215acaff..f132eb6964b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -21,22 +21,26 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.client.AsyncTable; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.NettyRpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.Token; @@ -51,11 +55,9 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.AuthenticationService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.WhoAmIRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.WhoAmIResponse; @@ -92,24 +94,58 @@ public void setUpBeforeMethod() { rpcClientImpl); } - @Test - public void test() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table table = conn.getTable(TableName.META_TABLE_NAME)) { - CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW); - AuthenticationProtos.AuthenticationService.BlockingInterface service = - AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance()); + private void testToken() throws Exception { + try (AsyncConnection conn = + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); + WhoAmIResponse response = + table. coprocessorService( + AuthenticationService::newStub, + (s, c, r) -> s.whoAmI(c, WhoAmIRequest.getDefaultInstance(), r), + HConstants.EMPTY_START_ROW).get(); assertEquals(USERNAME, response.getUsername()); assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod()); - try { - service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance()); - } catch (ServiceException e) { - IOException ioe = ProtobufUtil.getRemoteException(e); - assertThat(ioe, instanceOf(AccessDeniedException.class)); - assertThat(ioe.getMessage(), - containsString("Token generation only allowed for Kerberos authenticated clients")); - } + IOException ioe = + assertThrows(IOException.class, + () -> FutureUtils.get(table. coprocessorService(AuthenticationService::newStub, + (s, c, r) -> s.getAuthenticationToken(c, + GetAuthenticationTokenRequest.getDefaultInstance(), r), + HConstants.EMPTY_START_ROW))); + assertThat(ioe, instanceOf(AccessDeniedException.class)); + assertThat(ioe.getMessage(), + containsString("Token generation only allowed for Kerberos authenticated clients")); } + + } + + /** + * Confirm that we will use delegation token first if token and kerberos tickets are both present + */ + @Test + public void testTokenFirst() throws Exception { + testToken(); + } + + /** + * Confirm that we can connect to cluster successfully when there is only token present, i.e, no + * kerberos ticket + */ + @Test + public void testOnlyToken() throws Exception { + User user = + User.createUserForTesting(TEST_UTIL.getConfiguration(), "no_krb_user", new String[0]); + for (Token token : User.getCurrent().getUGI().getCredentials() + .getAllTokens()) { + user.getUGI().addToken(token); + } + user.getUGI().doAs(new PrivilegedExceptionAction() { + + @Override + public Void run() throws Exception { + testToken(); + return null; + } + }); } } From afbf2adba25c24ce6b895865005367563efc7cb4 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Tue, 6 Feb 2024 15:28:05 -0500 Subject: [PATCH 241/514] HBASE-27687 Enhance quotas to consume blockBytesScanned rather than response size (#5654) Signed-off-by: Bryan Beaudreault --- .../hbase/quotas/DefaultOperationQuota.java | 34 ++- .../hbase/quotas/ExceedOperationQuota.java | 6 +- .../hadoop/hbase/quotas/OperationQuota.java | 10 + .../quotas/RegionServerRpcQuotaManager.java | 20 +- .../hadoop/hbase/regionserver/HRegion.java | 10 + .../hadoop/hbase/regionserver/Region.java | 6 + .../quotas/TestBlockBytesScannedQuota.java | 233 ++++++++++++++++++ .../hbase/quotas/ThrottleQuotaTestUtil.java | 63 +++++ 8 files changed, 366 insertions(+), 16 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index ddf804243ed8..4b89e18a8021 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -22,6 +22,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -49,9 +51,15 @@ public class DefaultOperationQuota implements OperationQuota { protected long readDiff = 0; protected long writeCapacityUnitDiff = 0; protected long readCapacityUnitDiff = 0; + private boolean useResultSizeBytes; + private long blockSizeBytes; - public DefaultOperationQuota(final Configuration conf, final QuotaLimiter... limiters) { + public DefaultOperationQuota(final Configuration conf, final int blockSizeBytes, + final QuotaLimiter... limiters) { this(conf, Arrays.asList(limiters)); + this.useResultSizeBytes = + conf.getBoolean(OperationQuota.USE_RESULT_SIZE_BYTES, USE_RESULT_SIZE_BYTES_DEFAULT); + this.blockSizeBytes = blockSizeBytes; } /** @@ -94,8 +102,17 @@ public void checkQuota(int numWrites, int numReads, int numScans) throws RpcThro public void close() { // Adjust the quota consumed for the specified operation writeDiff = operationSize[OperationType.MUTATE.ordinal()] - writeConsumed; - readDiff = operationSize[OperationType.GET.ordinal()] - + operationSize[OperationType.SCAN.ordinal()] - readConsumed; + + long resultSize = + operationSize[OperationType.GET.ordinal()] + operationSize[OperationType.SCAN.ordinal()]; + if (useResultSizeBytes) { + readDiff = resultSize - readConsumed; + } else { + long blockBytesScanned = + RpcServer.getCurrentCall().map(RpcCall::getBlockBytesScanned).orElse(0L); + readDiff = Math.max(blockBytesScanned, resultSize) - readConsumed; + } + writeCapacityUnitDiff = calculateWriteCapacityUnitDiff(operationSize[OperationType.MUTATE.ordinal()], writeConsumed); readCapacityUnitDiff = calculateReadCapacityUnitDiff( @@ -140,8 +157,15 @@ public void addMutation(final Mutation mutation) { */ protected void updateEstimateConsumeQuota(int numWrites, int numReads, int numScans) { writeConsumed = estimateConsume(OperationType.MUTATE, numWrites, 100); - readConsumed = estimateConsume(OperationType.GET, numReads, 100); - readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000); + + if (useResultSizeBytes) { + readConsumed = estimateConsume(OperationType.GET, numReads, 100); + readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000); + } else { + // assume 1 block required for reads. this is probably a low estimate, which is okay + readConsumed = numReads > 0 ? blockSizeBytes : 0; + readConsumed += numScans > 0 ? blockSizeBytes : 0; + } writeCapacityUnitConsumed = calculateWriteCapacityUnit(writeConsumed); readCapacityUnitConsumed = calculateReadCapacityUnit(readConsumed); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java index 1b7200f5f22f..1788e550f22a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java @@ -40,9 +40,9 @@ public class ExceedOperationQuota extends DefaultOperationQuota { private static final Logger LOG = LoggerFactory.getLogger(ExceedOperationQuota.class); private QuotaLimiter regionServerLimiter; - public ExceedOperationQuota(final Configuration conf, QuotaLimiter regionServerLimiter, - final QuotaLimiter... limiters) { - super(conf, limiters); + public ExceedOperationQuota(final Configuration conf, int blockSizeBytes, + QuotaLimiter regionServerLimiter, final QuotaLimiter... limiters) { + super(conf, blockSizeBytes, limiters); this.regionServerLimiter = regionServerLimiter; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java index aaae64b6184a..e18d3eb34953 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java @@ -35,6 +35,16 @@ public enum OperationType { SCAN } + /** + * If false, the default, then IO based throttles will consume read availability based on the + * block bytes scanned by the given request. If true then IO based throttles will use result size + * rather than block bytes scanned. Using block bytes scanned should be preferable to using result + * size, because otherwise access patterns like heavily filtered scans may be able to produce a + * significant and effectively un-throttled workload. + */ + String USE_RESULT_SIZE_BYTES = "hbase.quota.use.result.size.bytes"; + boolean USE_RESULT_SIZE_BYTES_DEFAULT = false; + /** * Checks if it is possible to execute the specified operation. The quota will be estimated based * on the number of operations to perform and the average size accumulated during time. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 4b09c0308f9e..de76303e27ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Optional; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.ipc.RpcScheduler; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.regionserver.Region; @@ -113,7 +114,8 @@ QuotaCache getQuotaCache() { * @param table the table where the operation will be executed * @return the OperationQuota */ - public OperationQuota getQuota(final UserGroupInformation ugi, final TableName table) { + public OperationQuota getQuota(final UserGroupInformation ugi, final TableName table, + final int blockSizeBytes) { if (isQuotaEnabled() && !table.isSystemTable() && isRpcThrottleEnabled()) { UserQuotaState userQuotaState = quotaCache.getUserQuotaState(ugi); QuotaLimiter userLimiter = userQuotaState.getTableLimiter(table); @@ -123,7 +125,8 @@ public OperationQuota getQuota(final UserGroupInformation ugi, final TableName t LOG.trace("get quota for ugi=" + ugi + " table=" + table + " userLimiter=" + userLimiter); } if (!useNoop) { - return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter); + return new DefaultOperationQuota(this.rsServices.getConfiguration(), blockSizeBytes, + userLimiter); } } else { QuotaLimiter nsLimiter = quotaCache.getNamespaceLimiter(table.getNamespaceAsString()); @@ -139,11 +142,11 @@ public OperationQuota getQuota(final UserGroupInformation ugi, final TableName t } if (!useNoop) { if (exceedThrottleQuotaEnabled) { - return new ExceedOperationQuota(this.rsServices.getConfiguration(), rsLimiter, - userLimiter, tableLimiter, nsLimiter); + return new ExceedOperationQuota(this.rsServices.getConfiguration(), blockSizeBytes, + rsLimiter, userLimiter, tableLimiter, nsLimiter); } else { - return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter, - tableLimiter, nsLimiter, rsLimiter); + return new DefaultOperationQuota(this.rsServices.getConfiguration(), blockSizeBytes, + userLimiter, tableLimiter, nsLimiter, rsLimiter); } } } @@ -213,9 +216,10 @@ private OperationQuota checkQuota(final Region region, final int numWrites, fina } else { ugi = User.getCurrent().getUGI(); } - TableName table = region.getTableDescriptor().getTableName(); + TableDescriptor tableDescriptor = region.getTableDescriptor(); + TableName table = tableDescriptor.getTableName(); - OperationQuota quota = getQuota(ugi, table); + OperationQuota quota = getQuota(ugi, table, region.getMinBlockSizeBytes()); try { quota.checkQuota(numWrites, numReads, numScans); } catch (RpcThrottlingException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 0dc96747dd36..ae4045b1216b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -458,6 +458,8 @@ public MetricsTableRequests getMetricsTableRequests() { private final CellComparator cellComparator; + private final int minBlockSizeBytes; + /** * @return The smallest mvcc readPoint across all the scanners in this region. Writes older than * this readPoint, are included in every read operation. @@ -916,6 +918,9 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co .remove(getRegionInfo().getEncodedName()); } } + + minBlockSizeBytes = Arrays.stream(this.htableDescriptor.getColumnFamilies()) + .mapToInt(ColumnFamilyDescriptor::getBlocksize).min().orElse(HConstants.DEFAULT_BLOCKSIZE); } private void setHTableSpecificConf() { @@ -2047,6 +2052,11 @@ public Configuration getReadOnlyConfiguration() { return new ReadOnlyConfiguration(this.conf); } + @Override + public int getMinBlockSizeBytes() { + return minBlockSizeBytes; + } + private ThreadPoolExecutor getStoreOpenAndCloseThreadPool(final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.min(numStores, conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 6a897a5b9f36..42069e58092e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -571,4 +571,10 @@ void requestCompaction(byte[] family, String why, int priority, boolean major, * if you try to set a configuration. */ Configuration getReadOnlyConfiguration(); + + /** + * The minimum block size configuration from all relevant column families. This is used when + * estimating quota consumption. + */ + int getMinBlockSizeBytes(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java new file mode 100644 index 000000000000..e27ba123381c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java @@ -0,0 +1,233 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doMultiGets; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doScans; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.triggerUserCacheRefresh; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.waitMinuteQuota; + +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestBlockBytesScannedQuota { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBlockBytesScannedQuota.class); + + private final static Logger LOG = LoggerFactory.getLogger(TestBlockBytesScannedQuota.class); + + private static final int REFRESH_TIME = 5000; + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final byte[] FAMILY = Bytes.toBytes("cf"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); + + private static final TableName TABLE_NAME = TableName.valueOf("BlockBytesScannedQuotaTest"); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // client should fail fast + TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 10); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + + // quotas enabled, using block bytes scanned + TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME); + + // don't cache blocks to make IO predictable + TEST_UTIL.getConfiguration().setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); + + TEST_UTIL.startMiniCluster(1); + TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); + TEST_UTIL.createTable(TABLE_NAME, FAMILY); + TEST_UTIL.waitTableAvailable(TABLE_NAME); + QuotaCache.TEST_FORCE_REFRESH = true; + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + EnvironmentEdgeManager.reset(); + TEST_UTIL.deleteTable(TABLE_NAME); + TEST_UTIL.shutdownMiniCluster(); + } + + @After + public void tearDown() throws Exception { + ThrottleQuotaTestUtil.clearQuotaCache(TEST_UTIL); + } + + @Test + public void testBBSGet() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final String userName = User.getCurrent().getShortName(); + int blockSize = admin.getDescriptor(TABLE_NAME).getColumnFamily(FAMILY).getBlocksize(); + Table table = admin.getConnection().getTable(TABLE_NAME); + + doPuts(10_000, FAMILY, QUALIFIER, table); + TEST_UTIL.flush(TABLE_NAME); + + // Add ~10 block/min limit + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.READ_SIZE, + Math.round(10.1 * blockSize), TimeUnit.MINUTES)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + + // should execute at max 10 requests + testTraffic(() -> doGets(20, FAMILY, QUALIFIER, table), 10, 1); + + // wait a minute and you should get another 10 requests executed + waitMinuteQuota(); + testTraffic(() -> doGets(20, FAMILY, QUALIFIER, table), 10, 1); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + testTraffic(() -> doGets(100, FAMILY, QUALIFIER, table), 100, 0); + testTraffic(() -> doGets(100, FAMILY, QUALIFIER, table), 100, 0); + } + + @Test + public void testBBSScan() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final String userName = User.getCurrent().getShortName(); + int blockSize = admin.getDescriptor(TABLE_NAME).getColumnFamily(FAMILY).getBlocksize(); + Table table = admin.getConnection().getTable(TABLE_NAME); + + doPuts(10_000, FAMILY, QUALIFIER, table); + TEST_UTIL.flush(TABLE_NAME); + + // Add 1 block/min limit. + // This should only allow 1 scan per minute, because we estimate 1 block per scan + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, blockSize, + TimeUnit.MINUTES)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // should execute 1 request + testTraffic(() -> doScans(5, table), 1, 0); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + testTraffic(() -> doScans(100, table), 100, 0); + testTraffic(() -> doScans(100, table), 100, 0); + + // Add ~3 block/min limit. This should support >1 scans + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, + Math.round(3.1 * blockSize), TimeUnit.MINUTES)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + + // should execute some requests, but not all + testTraffic(() -> doScans(100, table), 100, 90); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + testTraffic(() -> doScans(100, table), 100, 0); + testTraffic(() -> doScans(100, table), 100, 0); + } + + @Test + public void testBBSMultiGet() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final String userName = User.getCurrent().getShortName(); + int blockSize = admin.getDescriptor(TABLE_NAME).getColumnFamily(FAMILY).getBlocksize(); + Table table = admin.getConnection().getTable(TABLE_NAME); + int rowCount = 10_000; + + doPuts(rowCount, FAMILY, QUALIFIER, table); + TEST_UTIL.flush(TABLE_NAME); + + // Add 1 block/min limit. + // This should only allow 1 multiget per minute, because we estimate 1 block per multiget + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, blockSize, + TimeUnit.MINUTES)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // should execute 1 request + testTraffic(() -> doMultiGets(10, 10, rowCount, FAMILY, QUALIFIER, table), 1, 1); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + testTraffic(() -> doMultiGets(100, 10, rowCount, FAMILY, QUALIFIER, table), 100, 0); + testTraffic(() -> doMultiGets(100, 10, rowCount, FAMILY, QUALIFIER, table), 100, 0); + + // Add ~100 block/min limit + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, + Math.round(100.1 * blockSize), TimeUnit.MINUTES)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + + // should execute approximately 10 batches of 10 requests + testTraffic(() -> doMultiGets(20, 10, rowCount, FAMILY, QUALIFIER, table), 10, 1); + + // wait a minute and you should get another ~10 batches of 10 requests + waitMinuteQuota(); + testTraffic(() -> doMultiGets(20, 10, rowCount, FAMILY, QUALIFIER, table), 10, 1); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + testTraffic(() -> doMultiGets(100, 10, rowCount, FAMILY, QUALIFIER, table), 100, 0); + testTraffic(() -> doMultiGets(100, 10, rowCount, FAMILY, QUALIFIER, table), 100, 0); + } + + private void testTraffic(Callable trafficCallable, long expectedSuccess, long marginOfError) + throws Exception { + TEST_UTIL.waitFor(90_000, () -> { + long actualSuccess; + try { + actualSuccess = trafficCallable.call(); + } catch (Exception e) { + throw new RuntimeException(e); + } + LOG.info("Traffic test yielded {} successful requests. Expected {} +/- {}", actualSuccess, + expectedSuccess, marginOfError); + boolean success = (actualSuccess >= expectedSuccess - marginOfError) + && (actualSuccess <= expectedSuccess + marginOfError); + if (!success) { + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + waitMinuteQuota(); + Thread.sleep(15_000L); + } + return success; + }); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java index de6f5653ad2c..bc2d0ae0713e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java @@ -18,12 +18,17 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; +import java.util.Random; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; @@ -105,6 +110,64 @@ static long doGets(int maxOps, final Table... tables) { return count; } + static long doGets(int maxOps, byte[] family, byte[] qualifier, final Table... tables) { + int count = 0; + try { + while (count < maxOps) { + Get get = new Get(Bytes.toBytes("row-" + count)); + get.addColumn(family, qualifier); + for (final Table table : tables) { + table.get(get); + } + count += tables.length; + } + } catch (IOException e) { + LOG.error("get failed after nRetries=" + count, e); + } + return count; + } + + static long doMultiGets(int maxOps, int batchSize, int rowCount, byte[] family, byte[] qualifier, + final Table... tables) { + int opCount = 0; + Random random = new Random(); + try { + while (opCount < maxOps) { + List gets = new ArrayList<>(batchSize); + while (gets.size() < batchSize) { + Get get = new Get(Bytes.toBytes("row-" + random.nextInt(rowCount))); + get.addColumn(family, qualifier); + gets.add(get); + } + for (final Table table : tables) { + table.get(gets); + } + opCount += tables.length; + } + } catch (IOException e) { + LOG.error("multiget failed after nRetries=" + opCount, e); + } + return opCount; + } + + static long doScans(int maxOps, Table table) { + int count = 0; + int caching = 100; + try { + Scan scan = new Scan(); + scan.setCaching(caching); + scan.setCacheBlocks(false); + ResultScanner scanner = table.getScanner(scan); + while (count < (maxOps * caching)) { + scanner.next(); + count += 1; + } + } catch (IOException e) { + LOG.error("scan failed after nRetries=" + count, e); + } + return count / caching; + } + static void triggerUserCacheRefresh(HBaseTestingUtil testUtil, boolean bypass, TableName... tables) throws Exception { triggerCacheRefresh(testUtil, bypass, true, false, false, false, false, tables); From 36562937468bf4a3b77724ab6c2fdd1e34de16a3 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 7 Feb 2024 13:06:11 +0100 Subject: [PATCH 242/514] HBASE-28345 Close HBase connection on exit from HBase Shell (#5665) Signed-off-by: Wellington Chevreuil Signed-off-by: Balazs Meszaros --- hbase-shell/src/main/ruby/shell.rb | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 414ab9d2bd51..39fbd2ccba16 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -108,6 +108,11 @@ class Shell # exit the interactive shell and save that this # happend via a call to exit def exit(ret = 0) + # Non-deamon Netty threadpool in ZK ClientCnxnSocketNetty cannot be shut down otherwise + begin + hbase.shutdown + rescue Exception + end @exit_code = ret IRB.irb_exit(IRB.CurrentContext.irb, ret) end From 275d928a7d48aebc2172d32294bac40cc9460d11 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Wed, 7 Feb 2024 09:23:17 -0500 Subject: [PATCH 243/514] HBASE-27800: Add support for default user quotas (#5666) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/quotas/QuotaCache.java | 3 +- .../apache/hadoop/hbase/quotas/QuotaUtil.java | 61 +++++++- .../hadoop/hbase/quotas/TestDefaultQuota.java | 138 ++++++++++++++++++ 3 files changed, 200 insertions(+), 2 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultQuota.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 0a57b9fd8f8f..67b2aecc5448 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -137,7 +137,8 @@ public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableNa * @return the quota info associated to specified user */ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { - return computeIfAbsent(userQuotaCache, getQuotaUserName(ugi), UserQuotaState::new, + return computeIfAbsent(userQuotaCache, getQuotaUserName(ugi), + () -> QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration()), this::triggerCacheRefresh); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index f9fa1a95c0cc..2e51a8f75610 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -49,7 +50,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TimeUnit; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.QuotaScope; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; @@ -73,6 +76,26 @@ public class QuotaUtil extends QuotaTableUtil { // the default one write capacity unit is 1024 bytes (1KB) public static final long DEFAULT_WRITE_CAPACITY_UNIT = 1024; + /* + * The below defaults, if configured, will be applied to otherwise unthrottled users. For example, + * set `hbase.quota.default.user.machine.read.size` to `1048576` in your hbase-site.xml to ensure + * that any given user may not query more than 1mb per second from any given machine, unless + * explicitly permitted by a persisted quota. All of these defaults use TimeUnit.SECONDS and + * QuotaScope.MACHINE. + */ + public static final String QUOTA_DEFAULT_USER_MACHINE_READ_NUM = + "hbase.quota.default.user.machine.read.num"; + public static final String QUOTA_DEFAULT_USER_MACHINE_READ_SIZE = + "hbase.quota.default.user.machine.read.size"; + public static final String QUOTA_DEFAULT_USER_MACHINE_REQUEST_NUM = + "hbase.quota.default.user.machine.request.num"; + public static final String QUOTA_DEFAULT_USER_MACHINE_REQUEST_SIZE = + "hbase.quota.default.user.machine.request.size"; + public static final String QUOTA_DEFAULT_USER_MACHINE_WRITE_NUM = + "hbase.quota.default.user.machine.write.num"; + public static final String QUOTA_DEFAULT_USER_MACHINE_WRITE_SIZE = + "hbase.quota.default.user.machine.write.size"; + /** Table descriptor for Quota internal table */ public static final TableDescriptor QUOTA_TABLE_DESC = TableDescriptorBuilder.newBuilder(QUOTA_TABLE_NAME) @@ -284,10 +307,14 @@ public static Map fetchUserQuotas(final Connection conne assert isUserRowKey(key); String user = getUserFromRowKey(key); + if (results[i].isEmpty()) { + userQuotas.put(user, buildDefaultUserQuotaState(connection.getConfiguration())); + continue; + } + final UserQuotaState quotaInfo = new UserQuotaState(nowTs); userQuotas.put(user, quotaInfo); - if (results[i].isEmpty()) continue; assert Bytes.equals(key, results[i].getRow()); try { @@ -321,6 +348,38 @@ public void visitUserQuotas(String userName, Quotas quotas) { return userQuotas; } + protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf) { + QuotaProtos.Throttle.Builder throttleBuilder = QuotaProtos.Throttle.newBuilder(); + + buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_NUM) + .ifPresent(throttleBuilder::setReadNum); + buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_SIZE) + .ifPresent(throttleBuilder::setReadSize); + buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_REQUEST_NUM) + .ifPresent(throttleBuilder::setReqNum); + buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_REQUEST_SIZE) + .ifPresent(throttleBuilder::setReqSize); + buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_WRITE_NUM) + .ifPresent(throttleBuilder::setWriteNum); + buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_WRITE_SIZE) + .ifPresent(throttleBuilder::setWriteSize); + + UserQuotaState state = new UserQuotaState(); + QuotaProtos.Quotas defaultQuotas = + QuotaProtos.Quotas.newBuilder().setThrottle(throttleBuilder.build()).build(); + state.setQuotas(defaultQuotas); + return state; + } + + private static Optional buildDefaultTimedQuota(Configuration conf, String key) { + int defaultSoftLimit = conf.getInt(key, -1); + if (defaultSoftLimit == -1) { + return Optional.empty(); + } + return Optional.of(ProtobufUtil.toTimedQuota(defaultSoftLimit, + java.util.concurrent.TimeUnit.SECONDS, org.apache.hadoop.hbase.quotas.QuotaScope.MACHINE)); + } + public static Map fetchTableQuotas(final Connection connection, final List gets, Map tableMachineFactors) throws IOException { return fetchGlobalQuotas("table", connection, gets, new KeyFromRow() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultQuota.java new file mode 100644 index 000000000000..9a2200731f60 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultQuota.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.triggerUserCacheRefresh; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.waitMinuteQuota; + +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestDefaultQuota { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestDefaultQuota.class); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final TableName TABLE_NAME = TableName.valueOf(UUID.randomUUID().toString()); + private static final int REFRESH_TIME = 5000; + private static final byte[] FAMILY = Bytes.toBytes("cf"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); + + @After + public void tearDown() throws Exception { + ThrottleQuotaTestUtil.clearQuotaCache(TEST_UTIL); + EnvironmentEdgeManager.reset(); + TEST_UTIL.deleteTable(TABLE_NAME); + TEST_UTIL.shutdownMiniCluster(); + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // quotas enabled, using block bytes scanned + TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME); + TEST_UTIL.getConfiguration().setInt(QuotaUtil.QUOTA_DEFAULT_USER_MACHINE_READ_NUM, 1); + + // don't cache blocks to make IO predictable + TEST_UTIL.getConfiguration().setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); + + TEST_UTIL.startMiniCluster(1); + TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); + TEST_UTIL.createTable(TABLE_NAME, FAMILY); + TEST_UTIL.waitTableAvailable(TABLE_NAME); + QuotaCache.TEST_FORCE_REFRESH = true; + + try (Admin admin = TEST_UTIL.getAdmin()) { + ThrottleQuotaTestUtil.doPuts(1_000, FAMILY, QUALIFIER, + admin.getConnection().getTable(TABLE_NAME)); + } + TEST_UTIL.flush(TABLE_NAME); + } + + @Test + public void testDefaultUserReadNum() throws Exception { + // Should have a strict throttle by default + TEST_UTIL.waitFor(60_000, () -> runGetsTest(100) < 100); + + // Add big quota and should be effectively unlimited + configureLenientThrottle(); + refreshQuotas(); + // Should run without error + TEST_UTIL.waitFor(60_000, () -> runGetsTest(100) == 100); + + // Remove all the limits, and should revert to strict default + unsetQuota(); + TEST_UTIL.waitFor(60_000, () -> runGetsTest(100) < 100); + } + + private void configureLenientThrottle() throws IOException { + try (Admin admin = TEST_UTIL.getAdmin()) { + admin.setQuota(QuotaSettingsFactory.throttleUser(getUserName(), ThrottleType.READ_NUMBER, + 100_000, TimeUnit.SECONDS)); + } + } + + private static String getUserName() throws IOException { + return User.getCurrent().getShortName(); + } + + private void refreshQuotas() throws Exception { + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + } + + private void unsetQuota() throws Exception { + try (Admin admin = TEST_UTIL.getAdmin()) { + admin.setQuota(QuotaSettingsFactory.unthrottleUser(getUserName())); + } + refreshQuotas(); + } + + private long runGetsTest(int attempts) throws Exception { + refreshQuotas(); + try (Table table = getTable()) { + return ThrottleQuotaTestUtil.doGets(attempts, FAMILY, QUALIFIER, table); + } + } + + private Table getTable() throws IOException { + TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 100); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + return TEST_UTIL.getConnection().getTableBuilder(TABLE_NAME, null).setOperationTimeout(250) + .build(); + } + +} From e85557a34d763c37baa36e94800689947e69b930 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andor=20Moln=C3=A1r?= Date: Thu, 8 Feb 2024 15:50:52 +0100 Subject: [PATCH 244/514] HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade authentication (#5659) Signed-off-by: Wellington Chevreuil --- .../NettyHBaseSaslRpcClientHandler.java | 6 +++ .../TestShadeSaslAuthenticationProvider.java | 43 +++++-------------- 2 files changed, 17 insertions(+), 32 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index 48e631c76299..cc71355d4297 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -145,6 +145,12 @@ public byte[] run() throws Exception { // Mechanisms which have multiple steps will not return true on `SaslClient#isComplete()` // until the handshake has fully completed. Mechanisms which only send a single buffer may // return true on `isComplete()` after that initial response is calculated. + + // HBASE-28337 We still want to check if the SaslClient completed the handshake, because + // there are certain mechs like PLAIN which doesn't have a server response after the + // initial authentication request. We cannot remove this tryComplete(), otherwise mechs + // like PLAIN will fail with call timeout. + tryComplete(ctx); } catch (Exception e) { // the exception thrown by handlerAdded will not be passed to the exceptionCaught below // because netty will remove a handler if handlerAdded throws an exception. diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java index a479310691b1..26a8943096a9 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java @@ -27,8 +27,6 @@ import java.io.File; import java.io.IOException; import java.io.OutputStreamWriter; -import java.io.PrintWriter; -import java.io.StringWriter; import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -69,10 +67,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -84,8 +80,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; - @Category({ MediumTests.class, SecurityTests.class }) public class TestShadeSaslAuthenticationProvider { private static final Logger LOG = @@ -212,21 +206,23 @@ public String run() throws Exception { @Test public void testPositiveAuthentication() throws Exception { final Configuration clientConf = new Configuration(CONF); - try (Connection conn = ConnectionFactory.createConnection(clientConf)) { + try (Connection conn1 = ConnectionFactory.createConnection(clientConf)) { UserGroupInformation user1 = UserGroupInformation.createUserForTesting("user1", new String[0]); - user1.addToken(ShadeClientTokenUtil.obtainToken(conn, "user1", USER1_PASSWORD)); + user1.addToken(ShadeClientTokenUtil.obtainToken(conn1, "user1", USER1_PASSWORD)); user1.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table t = conn.getTable(tableName)) { - Result r = t.get(new Get(Bytes.toBytes("r1"))); - assertNotNull(r); - assertFalse("Should have read a non-empty Result", r.isEmpty()); - final Cell cell = r.getColumnLatestCell(Bytes.toBytes("f1"), Bytes.toBytes("q1")); - assertTrue("Unexpected value", CellUtil.matchingValue(cell, Bytes.toBytes("1"))); + try (Connection conn = ConnectionFactory.createConnection(clientConf)) { + try (Table t = conn.getTable(tableName)) { + Result r = t.get(new Get(Bytes.toBytes("r1"))); + assertNotNull(r); + assertFalse("Should have read a non-empty Result", r.isEmpty()); + final Cell cell = r.getColumnLatestCell(Bytes.toBytes("f1"), Bytes.toBytes("q1")); + assertTrue("Unexpected value", CellUtil.matchingValue(cell, Bytes.toBytes("1"))); - return null; + return null; + } } } }); @@ -268,7 +264,6 @@ public Void run() throws Exception { } catch (Exception e) { LOG.info("Caught exception in negative Master connectivity test", e); assertEquals("Found unexpected exception", pair.getSecond(), e.getClass()); - validateRootCause(Throwables.getRootCause(e)); } return null; } @@ -287,7 +282,6 @@ public Void run() throws Exception { } catch (Exception e) { LOG.info("Caught exception in negative RegionServer connectivity test", e); assertEquals("Found unexpected exception", pair.getSecond(), e.getClass()); - validateRootCause(Throwables.getRootCause(e)); } return null; } @@ -301,19 +295,4 @@ public Void run() throws Exception { } }); } - - void validateRootCause(Throwable rootCause) { - LOG.info("Root cause was", rootCause); - if (rootCause instanceof RemoteException) { - RemoteException re = (RemoteException) rootCause; - IOException actualException = re.unwrapRemoteException(); - assertEquals(InvalidToken.class, actualException.getClass()); - } else { - StringWriter writer = new StringWriter(); - rootCause.printStackTrace(new PrintWriter(writer)); - String text = writer.toString(); - assertTrue("Message did not contain expected text", - text.contains(InvalidToken.class.getName())); - } - } } From 98eb3e01b352684de3c647a6fda6208a657c4607 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Thu, 8 Feb 2024 16:16:47 -0500 Subject: [PATCH 245/514] HBASE-28349 Count atomic operations against read quotas (#5668) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/quotas/OperationQuota.java | 3 +- .../apache/hadoop/hbase/quotas/QuotaUtil.java | 26 ++ .../quotas/RegionServerRpcQuotaManager.java | 16 +- .../hbase/regionserver/RSRpcServices.java | 9 +- .../hbase/quotas/TestAtomicReadQuota.java | 237 ++++++++++++++++++ 5 files changed, 283 insertions(+), 8 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestAtomicReadQuota.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java index e18d3eb34953..ffc3cd50825c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java @@ -32,7 +32,8 @@ public interface OperationQuota { public enum OperationType { MUTATE, GET, - SCAN + SCAN, + CHECK_AND_MUTATE } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 2e51a8f75610..44357c88d2dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -51,6 +51,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TimeUnit; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.QuotaScope; @@ -177,6 +178,31 @@ public static void deleteRegionServerQuota(final Connection connection, final St deleteQuotas(connection, getRegionServerRowKey(regionServer)); } + public static OperationQuota.OperationType getQuotaOperationType(ClientProtos.Action action, + boolean hasCondition) { + if (action.hasMutation()) { + return getQuotaOperationType(action.getMutation(), hasCondition); + } + return OperationQuota.OperationType.GET; + } + + public static OperationQuota.OperationType + getQuotaOperationType(ClientProtos.MutateRequest mutateRequest) { + return getQuotaOperationType(mutateRequest.getMutation(), mutateRequest.hasCondition()); + } + + private static OperationQuota.OperationType + getQuotaOperationType(ClientProtos.MutationProto mutationProto, boolean hasCondition) { + ClientProtos.MutationProto.MutationType mutationType = mutationProto.getMutateType(); + if ( + hasCondition || mutationType == ClientProtos.MutationProto.MutationType.APPEND + || mutationType == ClientProtos.MutationProto.MutationType.INCREMENT + ) { + return OperationQuota.OperationType.CHECK_AND_MUTATE; + } + return OperationQuota.OperationType.MUTATE; + } + protected static void switchExceedThrottleQuota(final Connection connection, boolean exceedThrottleQuotaEnabled) throws IOException { if (exceedThrottleQuotaEnabled) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index de76303e27ac..3c72c662887b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -171,6 +171,8 @@ public OperationQuota checkQuota(final Region region, final OperationQuota.Opera return checkQuota(region, 0, 1, 0); case MUTATE: return checkQuota(region, 1, 0, 0); + case CHECK_AND_MUTATE: + return checkQuota(region, 1, 1, 0); } throw new RuntimeException("Invalid operation type: " + type); } @@ -178,18 +180,24 @@ public OperationQuota checkQuota(final Region region, final OperationQuota.Opera /** * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the * available quota and to report the data/usage of the operation. - * @param region the region where the operation will be performed - * @param actions the "multi" actions to perform + * @param region the region where the operation will be performed + * @param actions the "multi" actions to perform + * @param hasCondition whether the RegionAction has a condition * @return the OperationQuota * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. */ - public OperationQuota checkQuota(final Region region, final List actions) - throws IOException, RpcThrottlingException { + public OperationQuota checkQuota(final Region region, final List actions, + boolean hasCondition) throws IOException, RpcThrottlingException { int numWrites = 0; int numReads = 0; for (final ClientProtos.Action action : actions) { if (action.hasMutation()) { numWrites++; + OperationQuota.OperationType operationType = + QuotaUtil.getQuotaOperationType(action, hasCondition); + if (operationType == OperationQuota.OperationType.CHECK_AND_MUTATE) { + numReads++; + } } else if (action.hasGet()) { numReads++; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 05d7c2e56055..0538b9706e89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2679,7 +2679,8 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) try { region = getRegion(regionSpecifier); - quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList()); + quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList(), + regionAction.hasCondition()); } catch (IOException e) { failRegionAction(responseBuilder, regionActionResultBuilder, regionAction, cellScanner, e); return responseBuilder.build(); @@ -2741,7 +2742,8 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) try { region = getRegion(regionSpecifier); - quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList()); + quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList(), + regionAction.hasCondition()); } catch (IOException e) { failRegionAction(responseBuilder, regionActionResultBuilder, regionAction, cellScanner, e); continue; // For this region it's a failure. @@ -2924,7 +2926,8 @@ public MutateResponse mutate(final RpcController rpcc, final MutateRequest reque server.getMemStoreFlusher().reclaimMemStoreMemory(); } long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; - quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE); + OperationQuota.OperationType operationType = QuotaUtil.getQuotaOperationType(request); + quota = getRpcQuotaManager().checkQuota(region, operationType); ActivePolicyEnforcement spaceQuotaEnforcement = getSpaceQuotaManager().getActiveEnforcements(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestAtomicReadQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestAtomicReadQuota.java new file mode 100644 index 000000000000..9b654ac8e6d0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestAtomicReadQuota.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.CheckAndMutate; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestAtomicReadQuota { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAtomicReadQuota.class); + private static final Logger LOG = LoggerFactory.getLogger(TestAtomicReadQuota.class); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final TableName TABLE_NAME = TableName.valueOf(UUID.randomUUID().toString()); + private static final byte[] FAMILY = Bytes.toBytes("cf"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); + + @AfterClass + public static void tearDown() throws Exception { + ThrottleQuotaTestUtil.clearQuotaCache(TEST_UTIL); + EnvironmentEdgeManager.reset(); + TEST_UTIL.deleteTable(TABLE_NAME); + TEST_UTIL.shutdownMiniCluster(); + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, 1000); + TEST_UTIL.startMiniCluster(1); + TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); + TEST_UTIL.createTable(TABLE_NAME, FAMILY); + TEST_UTIL.waitTableAvailable(TABLE_NAME); + QuotaCache.TEST_FORCE_REFRESH = true; + } + + @Test + public void testIncrementCountedAgainstReadCapacity() throws Exception { + setupQuota(); + + Increment inc = new Increment(Bytes.toBytes(UUID.randomUUID().toString())); + inc.addColumn(FAMILY, QUALIFIER, 1); + testThrottle(table -> table.increment(inc)); + } + + @Test + public void testConditionalRowMutationsCountedAgainstReadCapacity() throws Exception { + setupQuota(); + + byte[] row = Bytes.toBytes(UUID.randomUUID().toString()); + Increment inc = new Increment(row); + inc.addColumn(FAMILY, Bytes.toBytes("doot"), 1); + Put put = new Put(row); + put.addColumn(FAMILY, Bytes.toBytes("doot"), Bytes.toBytes("v")); + + RowMutations rowMutations = new RowMutations(row); + rowMutations.add(inc); + rowMutations.add(put); + testThrottle(table -> table.mutateRow(rowMutations)); + } + + @Test + public void testNonConditionalRowMutationsOmittedFromReadCapacity() throws Exception { + setupQuota(); + + byte[] row = Bytes.toBytes(UUID.randomUUID().toString()); + Put put = new Put(row); + put.addColumn(FAMILY, Bytes.toBytes("doot"), Bytes.toBytes("v")); + + RowMutations rowMutations = new RowMutations(row); + rowMutations.add(put); + try (Table table = getTable()) { + for (int i = 0; i < 100; i++) { + table.mutateRow(rowMutations); + } + } + } + + @Test + public void testNonAtomicPutOmittedFromReadCapacity() throws Exception { + setupQuota(); + + byte[] row = Bytes.toBytes(UUID.randomUUID().toString()); + Put put = new Put(row); + put.addColumn(FAMILY, Bytes.toBytes("doot"), Bytes.toBytes("v")); + try (Table table = getTable()) { + for (int i = 0; i < 100; i++) { + table.put(put); + } + } + } + + @Test + public void testNonAtomicMultiPutOmittedFromReadCapacity() throws Exception { + setupQuota(); + + Put put1 = new Put(Bytes.toBytes(UUID.randomUUID().toString())); + put1.addColumn(FAMILY, Bytes.toBytes("doot"), Bytes.toBytes("v")); + Put put2 = new Put(Bytes.toBytes(UUID.randomUUID().toString())); + put2.addColumn(FAMILY, Bytes.toBytes("doot"), Bytes.toBytes("v")); + + Increment inc = new Increment(Bytes.toBytes(UUID.randomUUID().toString())); + inc.addColumn(FAMILY, Bytes.toBytes("doot"), 1); + + List puts = new ArrayList<>(2); + puts.add(put1); + puts.add(put2); + + try (Table table = getTable()) { + for (int i = 0; i < 100; i++) { + table.put(puts); + } + } + } + + @Test + public void testCheckAndMutateCountedAgainstReadCapacity() throws Exception { + setupQuota(); + + byte[] row = Bytes.toBytes(UUID.randomUUID().toString()); + byte[] value = Bytes.toBytes("v"); + Put put = new Put(row); + put.addColumn(FAMILY, Bytes.toBytes("doot"), value); + CheckAndMutate checkAndMutate = + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, value).build(put); + + testThrottle(table -> table.checkAndMutate(checkAndMutate)); + } + + @Test + public void testAtomicBatchCountedAgainstReadCapacity() throws Exception { + setupQuota(); + + byte[] row = Bytes.toBytes(UUID.randomUUID().toString()); + Increment inc = new Increment(row); + inc.addColumn(FAMILY, Bytes.toBytes("doot"), 1); + + List incs = new ArrayList<>(2); + incs.add(inc); + incs.add(inc); + + testThrottle(table -> { + Object[] results = new Object[] {}; + table.batch(incs, results); + return results; + }); + } + + private void setupQuota() throws Exception { + try (Admin admin = TEST_UTIL.getAdmin()) { + admin.setQuota(QuotaSettingsFactory.throttleUser(User.getCurrent().getShortName(), + ThrottleType.READ_NUMBER, 1, TimeUnit.MINUTES)); + } + ThrottleQuotaTestUtil.triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + } + + private void cleanupQuota() throws Exception { + try (Admin admin = TEST_UTIL.getAdmin()) { + admin.setQuota(QuotaSettingsFactory.unthrottleUser(User.getCurrent().getShortName())); + } + ThrottleQuotaTestUtil.triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + } + + private void testThrottle(ThrowingFunction request) throws Exception { + try (Table table = getTable()) { + // we have a read quota configured, so this should fail + TEST_UTIL.waitFor(60_000, () -> { + try { + request.run(table); + return false; + } catch (Exception e) { + boolean success = e.getCause() instanceof RpcThrottlingException; + if (!success) { + LOG.error("Unexpected exception", e); + } + return success; + } + }); + } finally { + cleanupQuota(); + } + } + + private Table getTable() throws IOException { + TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 100); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + return TEST_UTIL.getConnection().getTableBuilder(TABLE_NAME, null).setOperationTimeout(250) + .build(); + } + + @FunctionalInterface + private interface ThrowingFunction { + O run(I input) throws Exception; + } + +} From b3ffc4acb64815eb5dfbbb02889ef830e119e37a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Feb 2024 11:04:06 +0800 Subject: [PATCH 246/514] HBASE-28355 Bump cryptography in /dev-support/git-jira-release-audit (#5663) Bumps [cryptography](https://github.com/pyca/cryptography) from 41.0.6 to 42.0.0. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/41.0.6...42.0.0) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 99fb2e2d0de9..e9fc0361d8d8 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -19,7 +19,7 @@ blessed==1.17.0 certifi==2023.7.22 cffi==1.13.2 chardet==3.0.4 -cryptography==41.0.6 +cryptography==42.0.0 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 From 3ccf65579b682cd288eb37f8de634d9920392fdc Mon Sep 17 00:00:00 2001 From: Rajeshbabu Chintaguntla Date: Mon, 12 Feb 2024 09:19:24 +0530 Subject: [PATCH 247/514] HBASE-28341 [JDK17] Fix Failure TestLdapHttpServer (#5672) Co-authored-by: Rajeshbabu Chintaguntla --- pom.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index a32e73fb3377..b8ac1067cf4a 100644 --- a/pom.xml +++ b/pom.xml @@ -990,7 +990,9 @@ --add-exports java.base/sun.net.util=ALL-UNNAMED - --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED + --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED + --add-opens java.base/sun.security.x509=ALL-UNNAMED + --add-opens java.base/sun.security.util=ALL-UNNAMED ${hbase-surefire.argLine} @{jacocoArgLine} 1.5.1 From 3e281bd207213e6618d599a4a55db1b79dc36787 Mon Sep 17 00:00:00 2001 From: Monani Mihir Date: Mon, 12 Feb 2024 09:14:06 -0800 Subject: [PATCH 248/514] HBASE-28204 : Region Canary can take lot more time If any region (except the first region) starts with delete markers (#5675) Signed-off-by: David Manning Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/tool/CanaryTool.java | 37 +++++++++++++++---- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index d5676263c820..a3caf1b24c74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -510,19 +510,44 @@ public Void call() { private Void readColumnFamily(Table table, ColumnFamilyDescriptor column) { byte[] startKey = null; - Get get = null; Scan scan = null; ResultScanner rs = null; StopWatch stopWatch = new StopWatch(); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. if (startKey.length > 0) { - get = new Get(startKey); + Get get = new Get(startKey); get.setCacheBlocks(false); get.setFilter(new FirstKeyOnlyFilter()); get.addFamily(column.getName()); + // Converting get object to scan to enable RAW SCAN. + // This will work for all the regions of the HBase tables except first region of the table. + scan = new Scan(get); + scan.setRaw(rawScanEnabled); } else { scan = new Scan(); + // In case of first region of the HBase Table, we do not have start-key for the region. + // For Region Canary, we only need to scan a single row/cell in the region to make sure that + // region is accessible. + // + // When HBase table has more than 1 empty regions at start of the row-key space, Canary will + // create multiple scan object to find first available row in the table by scanning all the + // regions in sequence until it can find first available row. + // + // This could result in multiple millions of scans based on the size of table and number of + // empty regions in sequence. In test environment, A table with no data and 1100 empty + // regions, Single canary run was creating close to half million to 1 million scans to + // successfully do canary run for the table. + // + // Since First region of the table doesn't have any start key, We should set End Key as + // stop row and set inclusive=false to limit scan to single region only. + // + // TODO : In future, we can streamline Canary behaviour for all the regions by doing scan + // with startRow inclusive and stopRow exclusive instead of different behaviour for First + // Region of the table and rest of the region of the table. This way implementation is + // simplified. As of now this change has been kept minimal to avoid any unnecessary + // perf impact. + scan.withStopRow(region.getEndKey(), false); LOG.debug("rawScan {} for {}", rawScanEnabled, region.getTable()); scan.setRaw(rawScanEnabled); scan.setCaching(1); @@ -536,12 +561,8 @@ private Void readColumnFamily(Table table, ColumnFamilyDescriptor column) { column.getNameAsString(), Bytes.toStringBinary(startKey)); try { stopWatch.start(); - if (startKey.length > 0) { - table.get(get); - } else { - rs = table.getScanner(scan); - rs.next(); - } + rs = table.getScanner(scan); + rs.next(); stopWatch.stop(); this.readWriteLatency.add(stopWatch.getTime()); sink.publishReadTiming(serverName, region, column, stopWatch.getTime()); From e2ff15898410ee7e60e2dcd5eecedb5ed2fb525a Mon Sep 17 00:00:00 2001 From: Monani Mihir Date: Mon, 12 Feb 2024 17:16:32 -0800 Subject: [PATCH 249/514] HBASE-28357 MoveWithAck#isSuccessfulScan for Region movement should use Region End Key for limiting scan to one region only. (#5677) Signed-off-by: Viraj Jasani --- .../main/java/org/apache/hadoop/hbase/util/MoveWithAck.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java index ede1f8b71508..7143598679be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java @@ -110,8 +110,9 @@ private static String getTimeDiffInSec(long startTime) { * Tries to scan a row from passed region */ private void isSuccessfulScan(RegionInfo region) throws IOException { - Scan scan = new Scan().withStartRow(region.getStartKey()).setRaw(true).setOneRowLimit() - .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()).setCacheBlocks(false); + Scan scan = new Scan().withStartRow(region.getStartKey()).withStopRow(region.getEndKey(), false) + .setRaw(true).setOneRowLimit().setMaxResultSize(1L).setCaching(1) + .setFilter(new FirstKeyOnlyFilter()).setCacheBlocks(false); try (Table table = conn.getTable(region.getTable()); ResultScanner scanner = table.getScanner(scan)) { scanner.next(); From a4002d69a118296d5ac19adf46b1301be6ab6cfe Mon Sep 17 00:00:00 2001 From: Monani Mihir Date: Mon, 12 Feb 2024 17:23:42 -0800 Subject: [PATCH 250/514] HBASE-28356 RegionServer Canary should use Scan just like Region Canary with option to enable Raw Scan (#5676) Signed-off-by: David Manning Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/tool/CanaryTool.java | 42 +++++++++++++------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index a3caf1b24c74..92dca7c24c92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -670,14 +670,16 @@ static class RegionServerTask implements Callable { private String serverName; private RegionInfo region; private RegionServerStdOutSink sink; + private Boolean rawScanEnabled; private AtomicLong successes; RegionServerTask(Connection connection, String serverName, RegionInfo region, - RegionServerStdOutSink sink, AtomicLong successes) { + RegionServerStdOutSink sink, Boolean rawScanEnabled, AtomicLong successes) { this.connection = connection; this.serverName = serverName; this.region = region; this.sink = sink; + this.rawScanEnabled = rawScanEnabled; this.successes = successes; } @@ -702,22 +704,35 @@ public Void call() { get = new Get(startKey); get.setCacheBlocks(false); get.setFilter(new FirstKeyOnlyFilter()); - stopWatch.start(); - table.get(get); - stopWatch.stop(); + // Converting get object to scan to enable RAW SCAN. + // This will work for all the regions of the HBase tables except first region. + scan = new Scan(get); + } else { scan = new Scan(); + // In case of first region of the HBase Table, we do not have start-key for the region. + // For Region Canary, we only need scan a single row/cell in the region to make sure that + // region is accessible. + // + // When HBase table has more than 1 empty regions at start of the row-key space, Canary + // will create multiple scan object to find first available row in the table by scanning + // all the regions in sequence until it can find first available row. + // + // Since First region of the table doesn't have any start key, We should set End Key as + // stop row and set inclusive=false to limit scan to first region only. + scan.withStopRow(region.getEndKey(), false); scan.setCacheBlocks(false); scan.setFilter(new FirstKeyOnlyFilter()); scan.setCaching(1); scan.setMaxResultSize(1L); scan.setOneRowLimit(); - stopWatch.start(); - ResultScanner s = table.getScanner(scan); - s.next(); - s.close(); - stopWatch.stop(); } + scan.setRaw(rawScanEnabled); + stopWatch.start(); + ResultScanner s = table.getScanner(scan); + s.next(); + s.close(); + stopWatch.stop(); successes.incrementAndGet(); sink.publishReadTiming(tableName.getNameAsString(), serverName, stopWatch.getTime()); } catch (TableNotFoundException tnfe) { @@ -1778,6 +1793,7 @@ private ZookeeperStdOutSink getSink() { * A monitor for regionserver mode */ private static class RegionServerMonitor extends Monitor { + private boolean rawScanEnabled; private boolean allRegions; public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, @@ -1785,6 +1801,8 @@ public RegionServerMonitor(Connection connection, String[] monitorTargets, boole long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, allowedFailures); + Configuration conf = connection.getConfiguration(); + this.rawScanEnabled = conf.getBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, false); this.allRegions = allRegions; } @@ -1857,14 +1875,14 @@ private void monitorRegionServers(Map> rsAndRMap, } else if (this.allRegions) { for (RegionInfo region : entry.getValue()) { tasks.add(new RegionServerTask(this.connection, serverName, region, regionServerSink, - successes)); + this.rawScanEnabled, successes)); } } else { // random select a region if flag not set RegionInfo region = entry.getValue().get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); - tasks.add( - new RegionServerTask(this.connection, serverName, region, regionServerSink, successes)); + tasks.add(new RegionServerTask(this.connection, serverName, region, regionServerSink, + this.rawScanEnabled, successes)); } } try { From 9656006778789850892403107be0a85074959765 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 13 Feb 2024 11:21:01 +0100 Subject: [PATCH 251/514] HBASE-28353 Close HBase connection on implicit exit from HBase shell (#5673) Signed-off-by: Bryan Beaudreault Signed-off-by: Balazs Meszaros --- hbase-shell/src/main/ruby/shell.rb | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 39fbd2ccba16..46b38dd96b89 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -108,11 +108,6 @@ class Shell # exit the interactive shell and save that this # happend via a call to exit def exit(ret = 0) - # Non-deamon Netty threadpool in ZK ClientCnxnSocketNetty cannot be shut down otherwise - begin - hbase.shutdown - rescue Exception - end @exit_code = ret IRB.irb_exit(IRB.CurrentContext.irb, ret) end @@ -323,6 +318,13 @@ def get_workspace hbase_receiver.send :define_singleton_method, :exit, lambda { |rc = 0| @shell.exit(rc) } + at_exit do + # Non-deamon Netty threadpool in ZK ClientCnxnSocketNetty cannot be shut down otherwise + begin + hbase.shutdown + rescue Exception + end + end ::IRB::WorkSpace.new(hbase_receiver.get_binding) end From d9257543a5eb3e93de225fb68c5a13afa78b6a76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andor=20Moln=C3=A1r?= Date: Thu, 15 Feb 2024 14:25:22 +0100 Subject: [PATCH 252/514] HBASE-28340. Use all Zk client properties that is found in HBase conf (#5669) Signed-off-by: Balazs Meszaros --- .../hadoop/hbase/zookeeper/ZKConfig.java | 54 ++++++++----------- .../hadoop/hbase/zookeeper/TestZKConfig.java | 25 ++++++++- 2 files changed, 45 insertions(+), 34 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 12d81fee6586..87885e2b9fd5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map.Entry; import java.util.Properties; -import java.util.Set; import org.apache.commons.validator.routines.InetAddressValidator; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -29,7 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; /** * Utility methods for reading, and building the ZooKeeper configuration. The order and priority for @@ -42,12 +40,6 @@ public final class ZKConfig { private static final String VARIABLE_START = "${"; private static final String ZOOKEEPER_JAVA_PROPERTY_PREFIX = "zookeeper."; - /** Supported ZooKeeper client TLS properties */ - static final Set ZOOKEEPER_CLIENT_TLS_PROPERTIES = - ImmutableSet.of("client.secure", "clientCnxnSocket", "ssl.keyStore.location", - "ssl.keyStore.password", "ssl.keyStore.passwordPath", "ssl.trustStore.location", - "ssl.trustStore.password", "ssl.trustStore.passwordPath"); - private ZKConfig() { } @@ -62,16 +54,12 @@ public static Properties makeZKProps(Configuration conf) { } /** - * Make a Properties object holding ZooKeeper config. Parses the corresponding config options from - * the HBase XML configs and generates the appropriate ZooKeeper properties. - * @param conf Configuration to read from. - * @return Properties holding mappings representing ZooKeeper config file. + * Directly map all the hbase.zookeeper.property.KEY properties. Synchronize on conf so no loading + * of configs while we iterate */ - private static Properties makeZKPropsFromHbaseConfig(Configuration conf) { + private static Properties extractZKPropsFromHBaseConfig(final Configuration conf) { Properties zkProperties = new Properties(); - // Directly map all of the hbase.zookeeper.property.KEY properties. - // Synchronize on conf so no loading of configs while we iterate synchronized (conf) { for (Entry entry : conf) { String key = entry.getKey(); @@ -87,6 +75,18 @@ private static Properties makeZKPropsFromHbaseConfig(Configuration conf) { } } + return zkProperties; + } + + /** + * Make a Properties object holding ZooKeeper config. Parses the corresponding config options from + * the HBase XML configs and generates the appropriate ZooKeeper properties. + * @param conf Configuration to read from. + * @return Properties holding mappings representing ZooKeeper config file. + */ + private static Properties makeZKPropsFromHbaseConfig(Configuration conf) { + Properties zkProperties = extractZKPropsFromHBaseConfig(conf); + // If clientPort is not set, assign the default. if (zkProperties.getProperty(HConstants.CLIENT_PORT_STR) == null) { zkProperties.put(HConstants.CLIENT_PORT_STR, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); @@ -343,24 +343,12 @@ public static String getClientZKQuorumServersString(Configuration conf) { } private static void setZooKeeperClientSystemProperties(String prefix, Configuration conf) { - synchronized (conf) { - for (Entry entry : conf) { - String key = entry.getKey(); - if (!key.startsWith(prefix)) { - continue; - } - String zkKey = key.substring(prefix.length()); - if (!ZOOKEEPER_CLIENT_TLS_PROPERTIES.contains(zkKey)) { - continue; - } - String value = entry.getValue(); - // If the value has variables substitutions, need to do a get. - if (value.contains(VARIABLE_START)) { - value = conf.get(key); - } - if (System.getProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + zkKey) == null) { - System.setProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + zkKey, value); - } + Properties zkProperties = extractZKPropsFromHBaseConfig(conf); + for (Entry entry : zkProperties.entrySet()) { + String key = entry.getKey().toString().trim(); + String value = entry.getValue().toString().trim(); + if (System.getProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + key) == null) { + System.setProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + key, value); } } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java index 7418afe5d222..63df9043bae3 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hbase.zookeeper; -import static org.apache.hadoop.hbase.zookeeper.ZKConfig.ZOOKEEPER_CLIENT_TLS_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Properties; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,6 +33,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; + @Category({ MiscTests.class, SmallTests.class }) public class TestZKConfig { @@ -40,6 +42,12 @@ public class TestZKConfig { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKConfig.class); + /** Supported ZooKeeper client TLS properties */ + private static final Set ZOOKEEPER_CLIENT_TLS_PROPERTIES = ImmutableSet.of( + "client.secure", "clientCnxnSocket", "ssl.keyStore.location", "ssl.keyStore.password", + "ssl.keyStore.passwordPath", "ssl.keyStore.type", "ssl.trustStore.location", + "ssl.trustStore.password", "ssl.trustStore.passwordPath", "ssl.trustStore.type"); + @Test public void testZKConfigLoading() throws Exception { Configuration conf = HBaseConfiguration.create(); @@ -133,6 +141,21 @@ public void testZooKeeperTlsPropertiesServer() { } } + @Test + public void testZooKeeperPropertiesDoesntOverwriteSystem() { + // Arrange + System.setProperty("zookeeper.a.b.c", "foo"); + Configuration conf = HBaseConfiguration.create(); + conf.set(HConstants.ZK_CFG_PROPERTY_PREFIX + "a.b.c", "bar"); + + // Act + ZKConfig.getZKQuorumServersString(conf); + + // Assert + assertEquals("foo", System.getProperty("zookeeper.a.b.c")); + System.clearProperty("zookeeper.a.b.c"); + } + private void testKey(String ensemble, int port, String znode) throws IOException { testKey(ensemble, port, znode, false); // not support multiple client ports } From 96a447f0e64ae45c33e1ed23e7ba39cd3b0796a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 18:31:04 +0800 Subject: [PATCH 253/514] HBASE-28378 Bump cryptography in /dev-support/git-jira-release-audit (#5687) Bumps [cryptography](https://github.com/pyca/cryptography) from 42.0.0 to 42.0.2. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/42.0.0...42.0.2) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index e9fc0361d8d8..5e402a1875f4 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -19,7 +19,7 @@ blessed==1.17.0 certifi==2023.7.22 cffi==1.13.2 chardet==3.0.4 -cryptography==42.0.0 +cryptography==42.0.2 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 From b5175bad9deaf9caff8686e8957f8dd475e61f58 Mon Sep 17 00:00:00 2001 From: Anchal Kejriwal <55595137+anchal246@users.noreply.github.com> Date: Mon, 19 Feb 2024 01:24:18 +0530 Subject: [PATCH 254/514] HBASE-28142 Region Server Logs getting spammed with warning when storefile has no reader Signed-off-by: Nihal Jain Signed-off-by: Rajeshbabu Chintaguntla --- .../java/org/apache/hadoop/hbase/regionserver/HStore.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 5e2bf00f85be..dccfd0c0af7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1616,7 +1616,7 @@ protected void refreshStoreSizeAndTotalBytes() throws IOException { for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { StoreFileReader r = hsf.getReader(); if (r == null) { - LOG.warn("StoreFile {} has a null Reader", hsf); + LOG.debug("StoreFile {} has a null Reader", hsf); continue; } this.storeSize.addAndGet(r.length()); @@ -1785,7 +1785,7 @@ public int getCompactedFilesCount() { private LongStream getStoreFileAgeStream() { return this.storeEngine.getStoreFileManager().getStorefiles().stream().filter(sf -> { if (sf.getReader() == null) { - LOG.warn("StoreFile {} has a null Reader", sf); + LOG.debug("StoreFile {} has a null Reader", sf); return false; } else { return true; From 5398b13bab9769113a7b52038c31c7a6a3f77402 Mon Sep 17 00:00:00 2001 From: Kerasone <42959153+Kerasone@users.noreply.github.com> Date: Mon, 19 Feb 2024 12:21:07 +0800 Subject: [PATCH 255/514] HBASE-28238 rpcservice should perform some important admin operation to priority ADMIN_QOS (#5558) HBASE-28238 rpcservice should perform some important admin operation to priority ADMIN_QOS (#5558) Co-authored-by: selina.yan --- .../java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java | 1 + .../org/apache/hadoop/hbase/regionserver/RSRpcServices.java | 2 ++ .../apache/hadoop/hbase/regionserver/TestRSQosFunction.java | 3 +++ 3 files changed, 6 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java index 291b38acb322..b2a0e7803624 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java @@ -310,6 +310,7 @@ public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controlle } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public UpdateConfigurationResponse updateConfiguration(RpcController controller, UpdateConfigurationRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 0538b9706e89..4926aa30c8a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1554,6 +1554,7 @@ public CompactRegionResponse compactRegion(final RpcController controller, } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public CompactionSwitchResponse compactionSwitch(RpcController controller, CompactionSwitchRequest request) throws ServiceException { rpcPreCheck("compactionSwitch"); @@ -2232,6 +2233,7 @@ public ReplicateWALEntryResponse replicateWALEntry(final RpcController controlle * @param request the request */ @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public RollWALWriterResponse rollWALWriter(final RpcController controller, final RollWALWriterRequest request) throws ServiceException { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java index c6bd8967e124..15ee32397e9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java @@ -72,5 +72,8 @@ public void testAnnotations() { checkMethod(conf, "CloseRegion", HConstants.ADMIN_QOS, qosFunction); checkMethod(conf, "CompactRegion", HConstants.ADMIN_QOS, qosFunction); checkMethod(conf, "FlushRegion", HConstants.ADMIN_QOS, qosFunction); + checkMethod(conf, "UpdateConfiguration", HConstants.ADMIN_QOS, qosFunction); + checkMethod(conf, "CompactionSwitch", HConstants.ADMIN_QOS, qosFunction); + checkMethod(conf, "RollWALWriter", HConstants.ADMIN_QOS, qosFunction); } } From 7bc07a6563e631a1ae1ec464c619ca0e921d8945 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 19 Feb 2024 21:36:18 +0800 Subject: [PATCH 256/514] HBASE-28377 Fallback to simple is broken for blocking rpc client (#5690) Signed-off-by: Bryan Beaudreault --- .../hbase/ipc/BlockingRpcConnection.java | 1 + .../hbase/security/HBaseSaslRpcClient.java | 8 +- .../hbase/security/AbstractTestSecureIPC.java | 102 +++++++++++++++--- 3 files changed, 90 insertions(+), 21 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index 0478000a2375..3f1418aa9849 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -499,6 +499,7 @@ public Boolean run() throws IOException { // fall back to simple auth because server told us so. // do not change authMethod and useSasl here, we should start from secure when // reconnecting because regionserver may change its sasl config after restart. + saslRpcClient = null; } } createStreams(inStream, outStream); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index 0394bb0f2a3b..ace1c38ab22a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -32,6 +32,7 @@ import javax.security.sasl.SaslException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; @@ -107,12 +108,9 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio int len = inStream.readInt(); if (len == SaslUtil.SWITCH_TO_SIMPLE_AUTH) { if (!fallbackAllowed) { - throw new IOException("Server asks us to fall back to SIMPLE auth, " - + "but this client is configured to only allow secure connections."); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Server asks us to fall back to simple auth."); + throw new FallbackDisallowedException(); } + LOG.debug("Server asks us to fall back to simple auth."); dispose(); return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java index 26405f4446bb..998896c94685 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java @@ -24,17 +24,22 @@ import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.loginKerberosPrincipal; import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.setSecuredConfiguration; import static org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProviders.SELECTOR_KEY; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThrows; import static org.junit.Assert.fail; +import java.io.EOFException; import java.io.File; import java.io.IOException; import java.lang.reflect.Field; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collections; import java.util.Map; @@ -44,12 +49,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServerFactory; -import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.security.provider.AuthenticationProviderSelector; import org.apache.hadoop.hbase.security.provider.BuiltInProviderSelector; import org.apache.hadoop.hbase.security.provider.SaslAuthMethod; @@ -95,6 +101,7 @@ protected static void initKDCAndConf() throws Exception { // set a smaller timeout and retry to speed up tests TEST_UTIL.getConfiguration().setInt(RpcClient.SOCKET_TIMEOUT_READ, 2000); TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxretries", 1); + TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxbackoff", 100); } protected static void stopKDC() throws InterruptedException { @@ -237,7 +244,7 @@ public String getTokenKind() { } @Test - public void testRpcFallbackToSimpleAuth() throws Exception { + public void testRpcServerFallbackToSimpleAuth() throws Exception { String clientUsername = "testuser"; UserGroupInformation clientUgi = UserGroupInformation.createUserForTesting(clientUsername, new String[] { clientUsername }); @@ -252,6 +259,59 @@ public void testRpcFallbackToSimpleAuth() throws Exception { callRpcService(User.create(clientUgi)); } + @Test + public void testRpcServerDisallowFallbackToSimpleAuth() throws Exception { + String clientUsername = "testuser"; + UserGroupInformation clientUgi = + UserGroupInformation.createUserForTesting(clientUsername, new String[] { clientUsername }); + + // check that the client user is insecure + assertNotSame(ugi, clientUgi); + assertEquals(AuthenticationMethod.SIMPLE, clientUgi.getAuthenticationMethod()); + assertEquals(clientUsername, clientUgi.getUserName()); + + clientConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); + serverConf.setBoolean(RpcServer.FALLBACK_TO_INSECURE_CLIENT_AUTH, false); + IOException error = + assertThrows(IOException.class, () -> callRpcService(User.create(clientUgi))); + // server just closes the connection, so we could get broken pipe, or EOF, or connection closed + if (error.getMessage() == null || !error.getMessage().contains("Broken pipe")) { + assertThat(error, + either(instanceOf(EOFException.class)).or(instanceOf(ConnectionClosedException.class))); + } + } + + @Test + public void testRpcClientFallbackToSimpleAuth() throws Exception { + String serverUsername = "testuser"; + UserGroupInformation serverUgi = + UserGroupInformation.createUserForTesting(serverUsername, new String[] { serverUsername }); + // check that the server user is insecure + assertNotSame(ugi, serverUgi); + assertEquals(AuthenticationMethod.SIMPLE, serverUgi.getAuthenticationMethod()); + assertEquals(serverUsername, serverUgi.getUserName()); + + serverConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); + clientConf.setBoolean(RpcClient.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true); + callRpcService(User.create(serverUgi), User.create(ugi)); + } + + @Test + public void testRpcClientDisallowFallbackToSimpleAuth() throws Exception { + String serverUsername = "testuser"; + UserGroupInformation serverUgi = + UserGroupInformation.createUserForTesting(serverUsername, new String[] { serverUsername }); + // check that the server user is insecure + assertNotSame(ugi, serverUgi); + assertEquals(AuthenticationMethod.SIMPLE, serverUgi.getAuthenticationMethod()); + assertEquals(serverUsername, serverUgi.getUserName()); + + serverConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); + clientConf.setBoolean(RpcClient.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, false); + assertThrows(FallbackDisallowedException.class, + () -> callRpcService(User.create(serverUgi), User.create(ugi))); + } + private void setRpcProtection(String clientProtection, String serverProtection) { clientConf.set("hbase.rpc.protection", clientProtection); serverConf.set("hbase.rpc.protection", serverProtection); @@ -263,25 +323,25 @@ private void setRpcProtection(String clientProtection, String serverProtection) @Test public void testSaslWithCommonQop() throws Exception { setRpcProtection("privacy,authentication", "authentication"); - callRpcService(User.create(ugi)); + callRpcService(); setRpcProtection("authentication", "privacy,authentication"); - callRpcService(User.create(ugi)); + callRpcService(); setRpcProtection("integrity,authentication", "privacy,authentication"); - callRpcService(User.create(ugi)); + callRpcService(); setRpcProtection("integrity,authentication", "integrity,authentication"); - callRpcService(User.create(ugi)); + callRpcService(); setRpcProtection("privacy,authentication", "privacy,authentication"); - callRpcService(User.create(ugi)); + callRpcService(); } @Test public void testSaslNoCommonQop() throws Exception { setRpcProtection("integrity", "privacy"); - SaslException se = assertThrows(SaslException.class, () -> callRpcService(User.create(ugi))); + SaslException se = assertThrows(SaslException.class, () -> callRpcService()); assertEquals("No common protection layer between client and server", se.getMessage()); } @@ -292,7 +352,7 @@ public void testSaslNoCommonQop() throws Exception { public void testSaslWithCryptoAES() throws Exception { setRpcProtection("privacy", "privacy"); setCryptoAES("true", "true"); - callRpcService(User.create(ugi)); + callRpcService(); } /** @@ -303,11 +363,11 @@ public void testDifferentConfWithCryptoAES() throws Exception { setRpcProtection("privacy", "privacy"); setCryptoAES("false", "true"); - callRpcService(User.create(ugi)); + callRpcService(); setCryptoAES("true", "false"); try { - callRpcService(User.create(ugi)); + callRpcService(); fail("The exception should be thrown out for the rpc timeout."); } catch (Exception e) { // ignore the expected exception @@ -323,7 +383,7 @@ private void setCryptoAES(String clientCryptoAES, String serverCryptoAES) { * Sets up a RPC Server and a Client. Does a RPC checks the result. If an exception is thrown from * the stub, this function will throw root cause of that exception. */ - private void callRpcService(User clientUser) throws Exception { + private void callRpcService(User serverUser, User clientUser) throws Exception { SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); Mockito.when(securityInfoMock.getServerPrincipal()) .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); @@ -331,10 +391,12 @@ private void callRpcService(User clientUser) throws Exception { InetSocketAddress isa = new InetSocketAddress(HOST, 0); - RpcServerInterface rpcServer = RpcServerFactory.createRpcServer(null, "AbstractTestSecureIPC", - Lists - .newArrayList(new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), - isa, serverConf, new FifoRpcScheduler(serverConf, 1)); + RpcServer rpcServer = serverUser.getUGI() + .doAs((PrivilegedExceptionAction< + RpcServer>) () -> RpcServerFactory.createRpcServer(null, "AbstractTestSecureIPC", + Lists.newArrayList( + new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), + isa, serverConf, new FifoRpcScheduler(serverConf, 1))); rpcServer.start(); try (RpcClient rpcClient = RpcClientFactory.createClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString())) { @@ -364,6 +426,14 @@ public void uncaughtException(Thread th, Throwable ex) { } } + private void callRpcService(User clientUser) throws Exception { + callRpcService(User.create(ugi), clientUser); + } + + private void callRpcService() throws Exception { + callRpcService(User.create(ugi)); + } + public static class TestThread extends Thread { private final BlockingInterface stub; From 7be588e0d46f3ae82d526d9625b926fc8b45bc2d Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 19 Feb 2024 15:32:00 -0500 Subject: [PATCH 257/514] HBASE-28370 Default user quotas are refreshing too frequently (#5686) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/quotas/QuotaCache.java | 12 ++- .../apache/hadoop/hbase/quotas/QuotaUtil.java | 6 +- .../hadoop/hbase/quotas/TestQuotaCache.java | 89 +++++++++++++++++++ .../hbase/quotas/ThrottleQuotaTestUtil.java | 12 +++ 4 files changed, 115 insertions(+), 4 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 67b2aecc5448..9b3498ff8947 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -71,6 +71,8 @@ public class QuotaCache implements Stoppable { // for testing purpose only, enforce the cache to be always refreshed static boolean TEST_FORCE_REFRESH = false; + // for testing purpose only, block cache refreshes to reliably verify state + static boolean TEST_BLOCK_REFRESH = false; private final ConcurrentMap namespaceQuotaCache = new ConcurrentHashMap<>(); private final ConcurrentMap tableQuotaCache = new ConcurrentHashMap<>(); @@ -138,7 +140,7 @@ public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableNa */ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { return computeIfAbsent(userQuotaCache, getQuotaUserName(ugi), - () -> QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration()), + () -> QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration(), 0L), this::triggerCacheRefresh); } @@ -239,6 +241,14 @@ public QuotaRefresherChore(final int period, final Stoppable stoppable) { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "GC_UNRELATED_TYPES", justification = "I do not understand why the complaints, it looks good to me -- FIX") protected void chore() { + while (TEST_BLOCK_REFRESH) { + LOG.info("TEST_BLOCK_REFRESH=true, so blocking QuotaCache refresh until it is false"); + try { + Thread.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } // Prefetch online tables/namespaces for (TableName table : ((HRegionServer) QuotaCache.this.rsServices).getOnlineTables()) { if (table.isSystemTable()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 44357c88d2dc..0da1aa661658 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -334,7 +334,7 @@ public static Map fetchUserQuotas(final Connection conne String user = getUserFromRowKey(key); if (results[i].isEmpty()) { - userQuotas.put(user, buildDefaultUserQuotaState(connection.getConfiguration())); + userQuotas.put(user, buildDefaultUserQuotaState(connection.getConfiguration(), nowTs)); continue; } @@ -374,7 +374,7 @@ public void visitUserQuotas(String userName, Quotas quotas) { return userQuotas; } - protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf) { + protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf, long nowTs) { QuotaProtos.Throttle.Builder throttleBuilder = QuotaProtos.Throttle.newBuilder(); buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_NUM) @@ -390,7 +390,7 @@ protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf) { buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_WRITE_SIZE) .ifPresent(throttleBuilder::setWriteSize); - UserQuotaState state = new UserQuotaState(); + UserQuotaState state = new UserQuotaState(nowTs); QuotaProtos.Quotas defaultQuotas = QuotaProtos.Quotas.newBuilder().setThrottle(throttleBuilder.build()).build(); state.setQuotas(defaultQuotas); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache.java new file mode 100644 index 000000000000..89c77f43b352 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.waitMinuteQuota; +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, MediumTests.class }) +public class TestQuotaCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestQuotaCache.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final int REFRESH_TIME = 30_000; + + @After + public void tearDown() throws Exception { + ThrottleQuotaTestUtil.clearQuotaCache(TEST_UTIL); + EnvironmentEdgeManager.reset(); + TEST_UTIL.shutdownMiniCluster(); + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME); + TEST_UTIL.getConfiguration().setInt(QuotaUtil.QUOTA_DEFAULT_USER_MACHINE_READ_NUM, 1000); + + TEST_UTIL.startMiniCluster(1); + TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); + } + + @Test + public void testDefaultUserRefreshFrequency() throws Exception { + QuotaCache.TEST_BLOCK_REFRESH = true; + + QuotaCache quotaCache = + ThrottleQuotaTestUtil.getQuotaCaches(TEST_UTIL).stream().findAny().get(); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + + UserQuotaState userQuotaState = quotaCache.getUserQuotaState(ugi); + assertEquals(userQuotaState.getLastUpdate(), 0); + + QuotaCache.TEST_BLOCK_REFRESH = false; + // new user should have refreshed immediately + TEST_UTIL.waitFor(5_000, () -> userQuotaState.getLastUpdate() != 0); + long lastUpdate = userQuotaState.getLastUpdate(); + + // refresh should not apply to recently refreshed quota + quotaCache.triggerCacheRefresh(); + Thread.sleep(250); + long newLastUpdate = userQuotaState.getLastUpdate(); + assertEquals(lastUpdate, newLastUpdate); + + quotaCache.triggerCacheRefresh(); + waitMinuteQuota(); + // should refresh after time has passed + TEST_UTIL.waitFor(5_000, () -> lastUpdate != userQuotaState.getLastUpdate()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java index bc2d0ae0713e..ff34c52386bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java @@ -19,9 +19,11 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Random; +import java.util.Set; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; @@ -283,6 +285,16 @@ public String explainFailure() throws Exception { } } + static Set getQuotaCaches(HBaseTestingUtil testUtil) { + Set quotaCaches = new HashSet<>(); + for (RegionServerThread rst : testUtil.getMiniHBaseCluster().getRegionServerThreads()) { + RegionServerRpcQuotaManager quotaManager = + rst.getRegionServer().getRegionServerRpcQuotaManager(); + quotaCaches.add(quotaManager.getQuotaCache()); + } + return quotaCaches; + } + static void waitMinuteQuota() { envEdge.incValue(70000); } From 8c989c92a67cc4373c1291f400aa372bca2d33b6 Mon Sep 17 00:00:00 2001 From: Ahmad Alhour Date: Fri, 23 Feb 2024 12:54:59 +0100 Subject: [PATCH 258/514] HBASE-28342 Decommissioned hosts should be rejected by the HMaster (#5681) Signed-off by: Nick Dimiduk --- .../org/apache/hadoop/hbase/HConstants.java | 14 +++ .../DecommissionedHostRejectedException.java | 28 +++++ .../apache/hadoop/hbase/master/HMaster.java | 4 +- .../hadoop/hbase/master/ServerManager.java | 109 ++++++++++++++---- .../hbase/regionserver/HRegionServer.java | 6 + .../TestRegionServerReportForDuty.java | 77 ++++++++++++- 6 files changed, 215 insertions(+), 23 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 5b53d2b2c0d3..9597ec23d81a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1622,6 +1622,20 @@ public enum OperationStatusCode { */ public final static boolean HBASE_SERVER_USEIP_ENABLED_DEFAULT = false; + /** + * Should the HMaster reject hosts of decommissioned RegionServers, bypass matching their port and + * startcode parts of their ServerName or not? When True, the HMaster will reject a RegionServer's + * request to `reportForDuty` if it's hostname exists in the list of decommissioned RegionServers + * it maintains internally. Added in HBASE-28342. + */ + public final static String REJECT_DECOMMISSIONED_HOSTS_KEY = + "hbase.master.reject.decommissioned.hosts"; + + /** + * Default value of {@link #REJECT_DECOMMISSIONED_HOSTS_KEY} + */ + public final static boolean REJECT_DECOMMISSIONED_HOSTS_DEFAULT = false; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java new file mode 100644 index 000000000000..3d28b1e75be8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class DecommissionedHostRejectedException extends HBaseIOException { + public DecommissionedHostRejectedException(String message) { + super(message); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 88b82f01069e..ddef3e27b405 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -546,7 +546,6 @@ public HMaster(final Configuration conf) throws IOException { HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); // Do we publish the status? - boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); Class publisherClass = @@ -997,7 +996,10 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE masterRegion = MasterRegionFactory.create(this); rsListStorage = new MasterRegionServerList(masterRegion, this); + // Initialize the ServerManager and register it as a configuration observer this.serverManager = createServerManager(this, rsListStorage); + this.configurationManager.registerObserver(this.serverManager); + this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this); if ( !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 2afd48c58df5..a2ed4da53e39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; @@ -100,7 +102,7 @@ * only after the handler is fully enabled and has completed the handling. */ @InterfaceAudience.Private -public class ServerManager { +public class ServerManager implements ConfigurationObserver { public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART = "hbase.master.wait.on.regionservers.maxtostart"; @@ -172,6 +174,9 @@ public class ServerManager { /** Listeners that are called on server events. */ private List listeners = new CopyOnWriteArrayList<>(); + /** Configured value of HConstants.REJECT_DECOMMISSIONED_HOSTS_KEY */ + private volatile boolean rejectDecommissionedHostsConfig; + /** * Constructor. */ @@ -183,6 +188,35 @@ public ServerManager(final MasterServices master, RegionServerList storage) { warningSkew = c.getLong("hbase.master.warningclockskew", 10000); persistFlushedSequenceId = c.getBoolean(PERSIST_FLUSHEDSEQUENCEID, PERSIST_FLUSHEDSEQUENCEID_DEFAULT); + rejectDecommissionedHostsConfig = getRejectDecommissionedHostsConfig(c); + } + + /** + * Implementation of the ConfigurationObserver interface. We are interested in live-loading the + * configuration value of HConstants.REJECT_DECOMMISSIONED_HOSTS_KEY + * @param conf Server configuration instance + */ + @Override + public void onConfigurationChange(Configuration conf) { + final boolean newValue = getRejectDecommissionedHostsConfig(conf); + if (rejectDecommissionedHostsConfig == newValue) { + // no-op + return; + } + + LOG.info("Config Reload for RejectDecommissionedHosts. previous value: {}, new value: {}", + rejectDecommissionedHostsConfig, newValue); + + rejectDecommissionedHostsConfig = newValue; + } + + /** + * Reads the value of HConstants.REJECT_DECOMMISSIONED_HOSTS_KEY from the config and returns it + * @param conf Configuration instance of the Master + */ + public boolean getRejectDecommissionedHostsConfig(Configuration conf) { + return conf.getBoolean(HConstants.REJECT_DECOMMISSIONED_HOSTS_KEY, + HConstants.REJECT_DECOMMISSIONED_HOSTS_DEFAULT); } /** @@ -227,11 +261,14 @@ ServerName regionServerStartup(RegionServerStartupRequest request, int versionNu final String hostname = request.hasUseThisHostnameInstead() ? request.getUseThisHostnameInstead() : isaHostName; ServerName sn = ServerName.valueOf(hostname, request.getPort(), request.getServerStartCode()); + + // Check if the host should be rejected based on it's decommissioned status + checkRejectableDecommissionedStatus(sn); + checkClockSkew(sn, request.getServerCurrentTime()); checkIsDead(sn, "STARTUP"); if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn, versionNumber, version))) { - LOG.warn( - "THIS SHOULD NOT HAPPEN, RegionServerStartup" + " could not record the server: " + sn); + LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup could not record the server: {}", sn); } storage.started(sn); return sn; @@ -293,6 +330,42 @@ public void regionServerReport(ServerName sn, ServerMetrics sl) throws YouAreDea updateLastFlushedSequenceIds(sn, sl); } + /** + * Checks if the Master is configured to reject decommissioned hosts or not. When it's configured + * to do so, any RegionServer trying to join the cluster will have it's host checked against the + * list of hosts of currently decommissioned servers and potentially get prevented from reporting + * for duty; otherwise, we do nothing and we let them pass to the next check. See HBASE-28342 for + * details. + * @param sn The ServerName to check for + * @throws DecommissionedHostRejectedException if the Master is configured to reject + * decommissioned hosts and this host exists in the + * list of the decommissioned servers + */ + private void checkRejectableDecommissionedStatus(ServerName sn) + throws DecommissionedHostRejectedException { + LOG.info("Checking decommissioned status of RegionServer {}", sn.getServerName()); + + // If the Master is not configured to reject decommissioned hosts, return early. + if (!rejectDecommissionedHostsConfig) { + return; + } + + // Look for a match for the hostname in the list of decommissioned servers + for (ServerName server : getDrainingServersList()) { + if (Objects.equals(server.getHostname(), sn.getHostname())) { + // Found a match and master is configured to reject decommissioned hosts, throw exception! + LOG.warn( + "Rejecting RegionServer {} from reporting for duty because Master is configured " + + "to reject decommissioned hosts and this host was marked as such in the past.", + sn.getServerName()); + throw new DecommissionedHostRejectedException(String.format( + "Host %s exists in the list of decommissioned servers and Master is configured to " + + "reject decommissioned hosts", + sn.getHostname())); + } + } + } + /** * Check is a server of same host and port already exists, if not, or the existed one got a * smaller start code, record it. @@ -647,13 +720,8 @@ public synchronized void moveFromOnlineToDeadServers(final ServerName sn) { * Remove the server from the drain list. */ public synchronized boolean removeServerFromDrainList(final ServerName sn) { - // Warn if the server (sn) is not online. ServerName is of the form: - // , , + LOG.info("Removing server {} from the draining list.", sn); - if (!this.isServerOnline(sn)) { - LOG.warn("Server " + sn + " is not currently online. " - + "Removing from draining list anyway, as requested."); - } // Remove the server from the draining servers lists. return this.drainingServers.remove(sn); } @@ -663,22 +731,23 @@ public synchronized boolean removeServerFromDrainList(final ServerName sn) { * @return True if the server is added or the server is already on the drain list. */ public synchronized boolean addServerToDrainList(final ServerName sn) { - // Warn if the server (sn) is not online. ServerName is of the form: - // , , - - if (!this.isServerOnline(sn)) { - LOG.warn("Server " + sn + " is not currently online. " - + "Ignoring request to add it to draining list."); + // If master is not rejecting decommissioned hosts, warn if the server (sn) is not online. + // However, we want to add servers even if they're not online if the master is configured + // to reject decommissioned hosts + if (!rejectDecommissionedHostsConfig && !this.isServerOnline(sn)) { + LOG.warn("Server {} is not currently online. Ignoring request to add it to draining list.", + sn); return false; } - // Add the server to the draining servers lists, if it's not already in - // it. + + // Add the server to the draining servers lists, if it's not already in it. if (this.drainingServers.contains(sn)) { - LOG.warn("Server " + sn + " is already in the draining server list." - + "Ignoring request to add it again."); + LOG.warn( + "Server {} is already in the draining server list. Ignoring request to add it again.", sn); return true; } - LOG.info("Server " + sn + " added to draining server list."); + + LOG.info("Server {} added to draining server list.", sn); return this.drainingServers.add(sn); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index dfb8e2a204fe..c71859ee6c1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.log.HBaseMarkers; +import org.apache.hadoop.hbase.master.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -2664,6 +2665,11 @@ private RegionServerStartupResponse reportForDuty() throws IOException { LOG.error(HBaseMarkers.FATAL, "Master rejected startup because clock is out of sync", ioe); // Re-throw IOE will cause RS to abort throw ioe; + } else if (ioe instanceof DecommissionedHostRejectedException) { + LOG.error(HBaseMarkers.FATAL, + "Master rejected startup because the host is considered decommissioned", ioe); + // Re-throw IOE will cause RS to abort + throw ioe; } else if (ioe instanceof ServerNotRunningYetException) { LOG.debug("Master is not running yet"); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java index 4917d6f5aefa..b408229f59fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.*; import java.io.IOException; import java.io.StringWriter; +import java.util.Arrays; +import java.util.Collections; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.StringUtils; @@ -30,9 +35,11 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LocalHBaseCluster; +import org.apache.hadoop.hbase.MatcherPredicate; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -241,6 +248,72 @@ public void run() { waitForClusterOnline(master); } + /** + * Tests that the RegionServer's reportForDuty gets rejected by the master when the master is + * configured to reject decommissioned hosts and when there is a match for the joining + * RegionServer in the list of decommissioned servers. Test case for HBASE-28342. + */ + @Test + public void testReportForDutyGetsRejectedByMasterWhenConfiguredToRejectDecommissionedHosts() + throws Exception { + // Start a master and wait for it to become the active/primary master. + // Use a random unique port + cluster.getConfiguration().setInt(HConstants.MASTER_PORT, HBaseTestingUtil.randomFreePort()); + cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); + cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 1); + + // Set the cluster to reject decommissioned hosts + cluster.getConfiguration().setBoolean(HConstants.REJECT_DECOMMISSIONED_HOSTS_KEY, true); + + master = cluster.addMaster(); + rs = cluster.addRegionServer(); + master.start(); + rs.start(); + waitForClusterOnline(master); + + // Add a second decommissioned region server to the cluster, wait for it to fail reportForDuty + LogCapturer capturer = + new LogCapturer((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager + .getLogger(HRegionServer.class)); + + rs2 = cluster.addRegionServer(); + master.getMaster().decommissionRegionServers( + Collections.singletonList(rs2.getRegionServer().getServerName()), false); + rs2.start(); + + // Assert that the second regionserver has aborted + testUtil.waitFor(TimeUnit.SECONDS.toMillis(90), + new MatcherPredicate<>(() -> rs2.getRegionServer().isAborted(), is(true))); + + // Assert that the log messages for DecommissionedHostRejectedException exist in the logs + capturer.stopCapturing(); + + assertThat(capturer.getOutput(), + containsString("Master rejected startup because the host is considered decommissioned")); + + /** + * Assert that the following log message occurred (one line): + * "org.apache.hadoop.hbase.master.DecommissionedHostRejectedException: + * org.apache.hadoop.hbase.master.DecommissionedHostRejectedException: Host localhost exists in + * the list of decommissioned servers and Master is configured to reject decommissioned hosts" + */ + assertThat(Arrays.asList(capturer.getOutput().split("\n")), + hasItem(allOf(containsString(DecommissionedHostRejectedException.class.getSimpleName()), + containsString(DecommissionedHostRejectedException.class.getSimpleName()), + containsString("Host " + rs2.getRegionServer().getServerName().getHostname() + + " exists in the list of decommissioned servers and Master is configured to reject" + + " decommissioned hosts")))); + + assertThat(Arrays.asList(capturer.getOutput().split("\n")), + hasItem( + allOf(containsString("ABORTING region server " + rs2.getRegionServer().getServerName()), + containsString("Unhandled"), + containsString(DecommissionedHostRejectedException.class.getSimpleName()), + containsString("Host " + rs2.getRegionServer().getServerName().getHostname() + + " exists in the list of decommissioned servers and Master is configured to reject" + + " decommissioned hosts")))); + } + /** * Tests region sever reportForDuty with a non-default environment edge */ From 0ea11f2fc3a526b24c82248db0346d28bf20d12c Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 23 Feb 2024 12:29:45 -0500 Subject: [PATCH 259/514] HBASE-28390 WAL value compression fails for cells with large values (#5696) Signed-off-by: Andrew Purtell --- .../aircompressor/TestWALCompressionLz4.java | 13 -------- .../aircompressor/TestWALCompressionLzo.java | 14 -------- .../TestWALCompressionSnappy.java | 13 -------- .../aircompressor/TestWALCompressionZstd.java | 13 -------- .../brotli/TestWALCompressionBrotli.java | 13 -------- .../compress/lz4/TestWALCompressionLz4.java | 14 -------- .../xerial/TestWALCompressionSnappy.java | 13 -------- .../compress/xz/TestWALCompressionLzma.java | 13 -------- .../compress/zstd/TestWALCompressionZstd.java | 14 -------- .../regionserver/wal/CompressionContext.java | 17 ++++++++++ ...mpressionBoundedDelegatingInputStream.java | 16 ++++++---- .../hbase/wal/CompressedWALTestBase.java | 32 +++++++++++++++++-- .../hadoop/hbase/wal/TestCompressedWAL.java | 13 -------- .../TestCompressedWALValueCompression.java | 13 -------- 14 files changed, 55 insertions(+), 156 deletions(-) diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java index 34a7dcfedfc1..1361754189a1 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionLz4 extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionLz4 extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionLz4.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -60,10 +53,4 @@ public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java index 9c5bc8838c07..dd91b0cac025 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionLzo extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionLzo extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionLzo.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -59,11 +52,4 @@ public static void setUpBeforeClass() throws Exception { public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java index 72813bcbd656..93345b924fa0 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionSnappy extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionSnappy extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -60,10 +53,4 @@ public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java index 0f5c80ce269b..3b367a2db9bb 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionZstd extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionZstd extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionZstd.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -60,10 +53,4 @@ public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java index e37276fed6db..3539c4280d99 100644 --- a/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java +++ b/hbase-compression/hbase-compression-brotli/src/test/java/org/apache/hadoop/hbase/io/compress/brotli/TestWALCompressionBrotli.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionBrotli extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionBrotli extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionBrotli.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -60,10 +53,4 @@ public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java index 81b5d943dc6d..b79fe094de06 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionLz4 extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionLz4 extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionLz4.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -59,11 +52,4 @@ public static void setUpBeforeClass() throws Exception { public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java index 7a2bbfe7b872..bb6dac1e4529 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java @@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -31,10 +30,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionSnappy extends CompressedWALTestBase { @@ -43,9 +39,6 @@ public class TestWALCompressionSnappy extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionSnappy.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { assumeTrue(SnappyCodec.isLoaded()); @@ -63,10 +56,4 @@ public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java index ee937230cd26..aa74926cb819 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionLzma extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionLzma extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionLzma.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -60,10 +53,4 @@ public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java index 55d61cf83ec5..23a37ab86594 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALCompressionZstd extends CompressedWALTestBase { @@ -41,9 +37,6 @@ public class TestWALCompressionZstd extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALCompressionZstd.class); - @Rule - public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -59,11 +52,4 @@ public static void setUpBeforeClass() throws Exception { public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java index 633decab0a82..2481753dfb06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.ByteArrayOutputStream; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -129,9 +130,25 @@ public void decompress(InputStream in, int inLength, byte[] outArray, int outOff } else { lowerIn.reset(in, inLength); IOUtils.readFully(compressedIn, outArray, outOffset, outLength); + // if the uncompressed size was larger than the configured buffer size for the codec, + // the BlockCompressorStream will have left an extra 4 bytes hanging. This represents a size + // for the next segment, and it should be 0. See HBASE-28390 + if (lowerIn.available() == 4) { + int remaining = rawReadInt(lowerIn); + assert remaining == 0; + } } } + private int rawReadInt(InputStream in) throws IOException { + int b1 = in.read(); + int b2 = in.read(); + int b3 = in.read(); + int b4 = in.read(); + if ((b1 | b2 | b3 | b4) < 0) throw new EOFException(); + return ((b1 << 24) + (b2 << 16) + (b3 << 8) + (b4 << 0)); + } + public void clear() { if (compressedOut != null) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java index 0f4fd78a0b83..5d876b97aa81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALDecompressionBoundedDelegatingInputStream.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import org.apache.commons.io.IOUtils; @@ -67,19 +66,22 @@ public int read(byte[] b, int off, int len) throws IOException { if (pos >= limit) { return -1; } - int readLen = (int) Math.min(len, limit - pos); - try { - IOUtils.readFully(in, b, off, readLen); - } catch (EOFException e) { + int toRead = (int) Math.min(len, limit - pos); + int readBytes = IOUtils.read(in, b, off, toRead); + // increase pos by however many we actually read + pos += readBytes; + + if (readBytes != toRead) { // This is trick here, we will always try to read enough bytes to fill the buffer passed in, // or we reach the end of this compression block, if there are not enough bytes, we just // return -1 to let the upper layer fail with EOF // In WAL value decompression this is OK as if we can not read all the data, we will finally // get an EOF somewhere - LOG.debug("Got EOF while we want to read {} bytes from stream", readLen, e); + LOG.debug("Got EOF while we want to read {} bytes from stream, but only read {}", toRead, + readBytes); return -1; } - return readLen; + return toRead; } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java index 751e4dc88b38..2ff9223e74b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java @@ -36,9 +36,13 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @SuppressWarnings("checkstyle:innerassignment") -public class CompressedWALTestBase { +public abstract class CompressedWALTestBase { + private static final Logger LOG = LoggerFactory.getLogger(CompressedWALTestBase.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -67,14 +71,36 @@ public class CompressedWALTestBase { Arrays.fill(VALUE, off, (off += 1597), (byte) 'Q'); } - public void doTest(TableName tableName) throws Exception { + @Test + public void test() throws Exception { + testForSize(1000); + } + + @Test + public void testLarge() throws Exception { + testForSize(1024 * 1024); + } + + private void testForSize(int size) throws Exception { + TableName tableName = TableName.valueOf(getClass().getSimpleName() + "_testForSize_" + size); + doTest(tableName, size); + } + + public void doTest(TableName tableName, int valueSize) throws Exception { NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(tableName.getName(), 0); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); final int total = 1000; final byte[] row = Bytes.toBytes("row"); final byte[] family = Bytes.toBytes("family"); - final byte[] value = VALUE; + final byte[] value = new byte[valueSize]; + + int offset = 0; + while (offset + VALUE.length < value.length) { + System.arraycopy(VALUE, 0, value, offset, VALUE.length); + offset += VALUE.length; + } + final WALFactory wals = new WALFactory(TEST_UTIL.getConfiguration(), tableName.getNameAsString()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java index d12756507932..faf244b5db79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java @@ -20,16 +20,12 @@ import java.util.Arrays; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameter; @@ -43,9 +39,6 @@ public class TestCompressedWAL extends CompressedWALTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCompressedWAL.class); - @Rule - public TestName name = new TestName(); - @Parameter public String walProvider; @@ -66,10 +59,4 @@ public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java index ca4c8dd8c53a..598fd9f7a9ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -29,10 +28,7 @@ import org.junit.After; import org.junit.Before; import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; @@ -50,9 +46,6 @@ public static List params() { return HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS_PARAMETERIZED; } - @Rule - public TestName name = new TestName(); - private final Compression.Algorithm compression; public TestCompressedWALValueCompression(Compression.Algorithm algo) { @@ -72,10 +65,4 @@ public void setUp() throws Exception { public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - - @Test - public void test() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); - doTest(tableName); - } } From 63e3a43041cf89ec7c18962380c769f9a253e8d0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 24 Feb 2024 16:20:37 +0800 Subject: [PATCH 260/514] HBASE-28398 Make sure we close all the scanners in TestHRegion (#5701) Signed-off-by: Bryan Beaudreault Signed-off-by: Rajeshbabu Chintaguntla --- .../hbase/regionserver/TestHRegion.java | 1080 ++++++++--------- 1 file changed, 539 insertions(+), 541 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 6a1c285bf8db..d244ca767be2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -287,21 +287,26 @@ public void testSequenceId() throws IOException { HBaseTestingUtil.closeRegionAndWAL(this.region); assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); assertEquals(0, (long) region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); - // Open region again. - region = initHRegion(tableName, method, CONF, COLUMN_FAMILY_BYTES); - byte[] value = Bytes.toBytes(method); - // Make a random put against our cf. - Put put = new Put(value); - put.addColumn(COLUMN_FAMILY_BYTES, null, value); - region.put(put); - // No flush yet so init numbers should still be in place. - assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); - assertEquals(0, (long) region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); - region.flush(true); - long max = region.getMaxFlushedSeqId(); - HBaseTestingUtil.closeRegionAndWAL(this.region); - assertEquals(max, region.getMaxFlushedSeqId()); - this.region = null; + HRegion oldRegion = region; + try { + // Open region again. + region = initHRegion(tableName, method, CONF, COLUMN_FAMILY_BYTES); + byte[] value = Bytes.toBytes(method); + // Make a random put against our cf. + Put put = new Put(value); + put.addColumn(COLUMN_FAMILY_BYTES, null, value); + region.put(put); + // No flush yet so init numbers should still be in place. + assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); + assertEquals(0, (long) region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); + region.flush(true); + long max = region.getMaxFlushedSeqId(); + HBaseTestingUtil.closeRegionAndWAL(this.region); + assertEquals(max, region.getMaxFlushedSeqId()); + this.region = null; + } finally { + HBaseTestingUtil.closeRegionAndWAL(oldRegion); + } } /** @@ -615,39 +620,38 @@ public void testCompactionAffectedByScanners() throws Exception { Scan scan = new Scan(); scan.readVersions(3); // open the first scanner - RegionScanner scanner1 = region.getScanner(scan); - - Delete delete = new Delete(Bytes.toBytes("r1")); - region.delete(delete); - region.flush(true); - - // open the second scanner - RegionScanner scanner2 = region.getScanner(scan); - - List results = new ArrayList<>(); - - System.out.println("Smallest read point:" + region.getSmallestReadPoint()); + try (RegionScanner scanner1 = region.getScanner(scan)) { + Delete delete = new Delete(Bytes.toBytes("r1")); + region.delete(delete); + region.flush(true); + // open the second scanner + try (RegionScanner scanner2 = region.getScanner(scan)) { + List results = new ArrayList<>(); - // make a major compaction - region.compact(true); + LOG.info("Smallest read point:" + region.getSmallestReadPoint()); - // open the third scanner - RegionScanner scanner3 = region.getScanner(scan); + // make a major compaction + region.compact(true); - // get data from scanner 1, 2, 3 after major compaction - scanner1.next(results); - System.out.println(results); - assertEquals(1, results.size()); + // open the third scanner + try (RegionScanner scanner3 = region.getScanner(scan)) { + // get data from scanner 1, 2, 3 after major compaction + scanner1.next(results); + LOG.info(results.toString()); + assertEquals(1, results.size()); - results.clear(); - scanner2.next(results); - System.out.println(results); - assertEquals(0, results.size()); + results.clear(); + scanner2.next(results); + LOG.info(results.toString()); + assertEquals(0, results.size()); - results.clear(); - scanner3.next(results); - System.out.println(results); - assertEquals(0, results.size()); + results.clear(); + scanner3.next(results); + LOG.info(results.toString()); + assertEquals(0, results.size()); + } + } + } } @Test @@ -666,18 +670,18 @@ public void testToShowNPEOnRegionScannerReseek() throws Exception { Scan scan = new Scan(); scan.readVersions(3); // open the first scanner - RegionScanner scanner1 = region.getScanner(scan); - - System.out.println("Smallest read point:" + region.getSmallestReadPoint()); + try (RegionScanner scanner1 = region.getScanner(scan)) { + LOG.info("Smallest read point:" + region.getSmallestReadPoint()); - region.compact(true); + region.compact(true); - scanner1.reseek(Bytes.toBytes("r2")); - List results = new ArrayList<>(); - scanner1.next(results); - Cell keyValue = results.get(0); - Assert.assertTrue(Bytes.compareTo(CellUtil.cloneRow(keyValue), Bytes.toBytes("r2")) == 0); - scanner1.close(); + scanner1.reseek(Bytes.toBytes("r2")); + List results = new ArrayList<>(); + scanner1.next(results); + Cell keyValue = results.get(0); + assertTrue(Bytes.compareTo(CellUtil.cloneRow(keyValue), Bytes.toBytes("r2")) == 0); + scanner1.close(); + } } @Test @@ -1462,37 +1466,45 @@ public void testIncrWithReadOnlyTable() throws Exception { } private void deleteColumns(HRegion r, String value, String keyPrefix) throws IOException { - InternalScanner scanner = buildScanner(keyPrefix, value, r); int count = 0; - boolean more = false; - List results = new ArrayList<>(); - do { - more = scanner.next(results); - if (results != null && !results.isEmpty()) count++; - else break; - Delete delete = new Delete(CellUtil.cloneRow(results.get(0))); - delete.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2")); - r.delete(delete); - results.clear(); - } while (more); + try (InternalScanner scanner = buildScanner(keyPrefix, value, r)) { + boolean more = false; + List results = new ArrayList<>(); + do { + more = scanner.next(results); + if (results != null && !results.isEmpty()) { + count++; + } else { + break; + } + Delete delete = new Delete(CellUtil.cloneRow(results.get(0))); + delete.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2")); + r.delete(delete); + results.clear(); + } while (more); + } assertEquals("Did not perform correct number of deletes", 3, count); } private int getNumberOfRows(String keyPrefix, String value, HRegion r) throws Exception { - InternalScanner resultScanner = buildScanner(keyPrefix, value, r); - int numberOfResults = 0; - List results = new ArrayList<>(); - boolean more = false; - do { - more = resultScanner.next(results); - if (results != null && !results.isEmpty()) numberOfResults++; - else break; - for (Cell kv : results) { - System.out.println("kv=" + kv.toString() + ", " + Bytes.toString(CellUtil.cloneValue(kv))); - } - results.clear(); - } while (more); - return numberOfResults; + try (InternalScanner resultScanner = buildScanner(keyPrefix, value, r)) { + int numberOfResults = 0; + List results = new ArrayList<>(); + boolean more = false; + do { + more = resultScanner.next(results); + if (results != null && !results.isEmpty()) { + numberOfResults++; + } else { + break; + } + for (Cell kv : results) { + LOG.info("kv=" + kv.toString() + ", " + Bytes.toString(CellUtil.cloneValue(kv))); + } + results.clear(); + } while (more); + return numberOfResults; + } } private InternalScanner buildScanner(String keyPrefix, String value, HRegion r) @@ -3322,14 +3334,15 @@ public void testScanner_DeleteOneFamilyNotAnother() throws IOException { Scan scan = new Scan(); scan.addFamily(fam1).addFamily(fam2); - InternalScanner s = region.getScanner(scan); - List results = new ArrayList<>(); - s.next(results); - assertTrue(CellUtil.matchingRows(results.get(0), rowA)); + try (InternalScanner s = region.getScanner(scan)) { + List results = new ArrayList<>(); + s.next(results); + assertTrue(CellUtil.matchingRows(results.get(0), rowA)); - results.clear(); - s.next(results); - assertTrue(CellUtil.matchingRows(results.get(0), rowB)); + results.clear(); + s.next(results); + assertTrue(CellUtil.matchingRows(results.get(0), rowB)); + } } @Test @@ -3453,17 +3466,17 @@ public void doTestDelete_AndPostInsert(Delete delete) throws IOException, Interr // next: Scan scan = new Scan().withStartRow(row); scan.addColumn(fam1, qual1); - InternalScanner s = region.getScanner(scan); - - List results = new ArrayList<>(); - assertEquals(false, s.next(results)); - assertEquals(1, results.size()); - Cell kv = results.get(0); + try (InternalScanner s = region.getScanner(scan)) { + List results = new ArrayList<>(); + assertEquals(false, s.next(results)); + assertEquals(1, results.size()); + Cell kv = results.get(0); - assertArrayEquals(value2, CellUtil.cloneValue(kv)); - assertArrayEquals(fam1, CellUtil.cloneFamily(kv)); - assertArrayEquals(qual1, CellUtil.cloneQualifier(kv)); - assertArrayEquals(row, CellUtil.cloneRow(kv)); + assertArrayEquals(value2, CellUtil.cloneValue(kv)); + assertArrayEquals(fam1, CellUtil.cloneFamily(kv)); + assertArrayEquals(qual1, CellUtil.cloneQualifier(kv)); + assertArrayEquals(row, CellUtil.cloneRow(kv)); + } } @Test @@ -3657,11 +3670,7 @@ public void testGetScanner_WithOkFamilies() throws IOException { Scan scan = new Scan(); scan.addFamily(fam1); scan.addFamily(fam2); - try { - region.getScanner(scan); - } catch (Exception e) { - assertTrue("Families could not be found in Region", false); - } + region.getScanner(scan).close(); } @Test @@ -3675,13 +3684,7 @@ public void testGetScanner_WithNotOkFamilies() throws IOException { this.region = initHRegion(tableName, method, CONF, families); Scan scan = new Scan(); scan.addFamily(fam2); - boolean ok = false; - try { - region.getScanner(scan); - } catch (Exception e) { - ok = true; - } - assertTrue("Families could not be found in Region", ok); + assertThrows(NoSuchColumnFamilyException.class, () -> region.getScanner(scan)); } @Test @@ -3705,20 +3708,20 @@ public void testGetScanner_WithNoFamilies() throws IOException { region.put(put); Scan scan = null; - RegionScannerImpl is = null; - // Testing to see how many scanners that is produced by getScanner, - // starting - // with known number, 2 - current = 1 + // Testing to see how many scanners that is produced by getScanner, starting with known number, + // 2 - current = 1 scan = new Scan(); scan.addFamily(fam2); scan.addFamily(fam4); - is = region.getScanner(scan); - assertEquals(1, is.storeHeap.getHeap().size()); + try (RegionScannerImpl is = region.getScanner(scan)) { + assertEquals(1, is.storeHeap.getHeap().size()); + } scan = new Scan(); - is = region.getScanner(scan); - assertEquals(families.length - 1, is.storeHeap.getHeap().size()); + try (RegionScannerImpl is = region.getScanner(scan)) { + assertEquals(families.length - 1, is.storeHeap.getHeap().size()); + } } /** @@ -3739,15 +3742,7 @@ public void testGetScanner_WithRegionClosed() throws IOException { fail("Got IOException during initHRegion, " + e.getMessage()); } region.closed.set(true); - try { - region.getScanner(null); - fail("Expected to get an exception during getScanner on a region that is closed"); - } catch (NotServingRegionException e) { - // this is the correct exception that is expected - } catch (IOException e) { - fail("Got wrong type of exception - should be a NotServingRegionException, " - + "but was an IOException: " + e.getMessage()); - } + assertThrows(NotServingRegionException.class, () -> region.getScanner(null)); } @Test @@ -3783,30 +3778,30 @@ public void testRegionScanner_Next() throws IOException { Scan scan = new Scan(); scan.addFamily(fam2); scan.addFamily(fam4); - InternalScanner is = region.getScanner(scan); - - List res = null; - - // Result 1 - List expected1 = new ArrayList<>(); - expected1.add(new KeyValue(row1, fam2, null, ts, KeyValue.Type.Put, null)); - expected1.add(new KeyValue(row1, fam4, null, ts, KeyValue.Type.Put, null)); - - res = new ArrayList<>(); - is.next(res); - for (int i = 0; i < res.size(); i++) { - assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); - } + try (InternalScanner is = region.getScanner(scan)) { + List res = null; + + // Result 1 + List expected1 = new ArrayList<>(); + expected1.add(new KeyValue(row1, fam2, null, ts, KeyValue.Type.Put, null)); + expected1.add(new KeyValue(row1, fam4, null, ts, KeyValue.Type.Put, null)); + + res = new ArrayList<>(); + is.next(res); + for (int i = 0; i < res.size(); i++) { + assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); + } - // Result 2 - List expected2 = new ArrayList<>(); - expected2.add(new KeyValue(row2, fam2, null, ts, KeyValue.Type.Put, null)); - expected2.add(new KeyValue(row2, fam4, null, ts, KeyValue.Type.Put, null)); + // Result 2 + List expected2 = new ArrayList<>(); + expected2.add(new KeyValue(row2, fam2, null, ts, KeyValue.Type.Put, null)); + expected2.add(new KeyValue(row2, fam4, null, ts, KeyValue.Type.Put, null)); - res = new ArrayList<>(); - is.next(res); - for (int i = 0; i < res.size(); i++) { - assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); + res = new ArrayList<>(); + is.next(res); + for (int i = 0; i < res.size(); i++) { + assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); + } } } @@ -3852,14 +3847,14 @@ public void testScanner_ExplicitColumns_FromMemStore_EnforceVersions() throws IO scan.addColumn(fam1, qf1); scan.readVersions(MAX_VERSIONS); List actual = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(actual); + assertEquals(false, hasNext); - boolean hasNext = scanner.next(actual); - assertEquals(false, hasNext); - - // Verify result - for (int i = 0; i < expected.size(); i++) { - assertEquals(expected.get(i), actual.get(i)); + // Verify result + for (int i = 0; i < expected.size(); i++) { + assertEquals(expected.get(i), actual.get(i)); + } } } @@ -3909,14 +3904,14 @@ public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws I scan.addColumn(fam1, qf2); scan.readVersions(MAX_VERSIONS); List actual = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(actual); + assertEquals(false, hasNext); - boolean hasNext = scanner.next(actual); - assertEquals(false, hasNext); - - // Verify result - for (int i = 0; i < expected.size(); i++) { - assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + // Verify result + for (int i = 0; i < expected.size(); i++) { + assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + } } } @@ -3986,14 +3981,14 @@ public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() int versions = 3; scan.readVersions(versions); List actual = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); - - boolean hasNext = scanner.next(actual); - assertEquals(false, hasNext); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(actual); + assertEquals(false, hasNext); - // Verify result - for (int i = 0; i < expected.size(); i++) { - assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + // Verify result + for (int i = 0; i < expected.size(); i++) { + assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + } } } @@ -4041,14 +4036,14 @@ public void testScanner_Wildcard_FromMemStore_EnforceVersions() throws IOExcepti scan.addFamily(fam1); scan.readVersions(MAX_VERSIONS); List actual = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); - - boolean hasNext = scanner.next(actual); - assertEquals(false, hasNext); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(actual); + assertEquals(false, hasNext); - // Verify result - for (int i = 0; i < expected.size(); i++) { - assertEquals(expected.get(i), actual.get(i)); + // Verify result + for (int i = 0; i < expected.size(); i++) { + assertEquals(expected.get(i), actual.get(i)); + } } } @@ -4096,14 +4091,14 @@ public void testScanner_Wildcard_FromFilesOnly_EnforceVersions() throws IOExcept scan.addFamily(fam1); scan.readVersions(MAX_VERSIONS); List actual = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(actual); + assertEquals(false, hasNext); - boolean hasNext = scanner.next(actual); - assertEquals(false, hasNext); - - // Verify result - for (int i = 0; i < expected.size(); i++) { - assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + // Verify result + for (int i = 0; i < expected.size(); i++) { + assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + } } } @@ -4143,11 +4138,11 @@ public void testScanner_StopRow1542() throws IOException { Scan scan = new Scan().withStartRow(row3).withStopRow(row4); scan.readAllVersions(); scan.addColumn(family, col1); - InternalScanner s = region.getScanner(scan); - - List results = new ArrayList<>(); - assertEquals(false, s.next(results)); - assertEquals(0, results.size()); + try (InternalScanner s = region.getScanner(scan)) { + List results = new ArrayList<>(); + assertEquals(false, s.next(results)); + assertEquals(0, results.size()); + } } @Test @@ -4212,14 +4207,14 @@ public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions() throws I int versions = 3; scan.readVersions(versions); List actual = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); - - boolean hasNext = scanner.next(actual); - assertEquals(false, hasNext); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(actual); + assertEquals(false, hasNext); - // Verify result - for (int i = 0; i < expected.size(); i++) { - assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + // Verify result + for (int i = 0; i < expected.size(); i++) { + assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i))); + } } } @@ -4267,22 +4262,22 @@ public void testScanner_JoinedScanners() throws IOException { CompareOperator.NOT_EQUAL, filtered_val); scan.setFilter(filter); scan.setLoadColumnFamiliesOnDemand(true); - InternalScanner s = region.getScanner(scan); - - List results = new ArrayList<>(); - assertTrue(s.next(results)); - assertEquals(1, results.size()); - results.clear(); + try (InternalScanner s = region.getScanner(scan)) { + List results = new ArrayList<>(); + assertTrue(s.next(results)); + assertEquals(1, results.size()); + results.clear(); - assertTrue(s.next(results)); - assertEquals(3, results.size()); - assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha)); - assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential)); - assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined)); - results.clear(); + assertTrue(s.next(results)); + assertEquals(3, results.size()); + assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha)); + assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential)); + assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined)); + results.clear(); - assertFalse(s.next(results)); - assertEquals(0, results.size()); + assertFalse(s.next(results)); + assertEquals(0, results.size()); + } } /** @@ -4325,55 +4320,55 @@ public boolean isFamilyEssential(byte[] name) { }; scan.setFilter(bogusFilter); - InternalScanner s = region.getScanner(scan); - - // Our data looks like this: - // r0: first:a, first:b, second:a, second:b - // r1: first:a, first:b, second:a, second:b - // r2: first:a, first:b, second:a, second:b - // r3: first:a, first:b, second:a, second:b - // r4: first:a, first:b, second:a, second:b - // r5: first:a - // r6: first:a - // r7: first:a - // r8: first:a - // r9: first:a - - // But due to next's limit set to 3, we should get this: - // r0: first:a, first:b, second:a - // r0: second:b - // r1: first:a, first:b, second:a - // r1: second:b - // r2: first:a, first:b, second:a - // r2: second:b - // r3: first:a, first:b, second:a - // r3: second:b - // r4: first:a, first:b, second:a - // r4: second:b - // r5: first:a - // r6: first:a - // r7: first:a - // r8: first:a - // r9: first:a - - List results = new ArrayList<>(); - int index = 0; - ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(3).build(); - while (true) { - boolean more = s.next(results, scannerContext); - if ((index >> 1) < 5) { - if (index % 2 == 0) { - assertEquals(3, results.size()); + try (InternalScanner s = region.getScanner(scan)) { + // Our data looks like this: + // r0: first:a, first:b, second:a, second:b + // r1: first:a, first:b, second:a, second:b + // r2: first:a, first:b, second:a, second:b + // r3: first:a, first:b, second:a, second:b + // r4: first:a, first:b, second:a, second:b + // r5: first:a + // r6: first:a + // r7: first:a + // r8: first:a + // r9: first:a + + // But due to next's limit set to 3, we should get this: + // r0: first:a, first:b, second:a + // r0: second:b + // r1: first:a, first:b, second:a + // r1: second:b + // r2: first:a, first:b, second:a + // r2: second:b + // r3: first:a, first:b, second:a + // r3: second:b + // r4: first:a, first:b, second:a + // r4: second:b + // r5: first:a + // r6: first:a + // r7: first:a + // r8: first:a + // r9: first:a + + List results = new ArrayList<>(); + int index = 0; + ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(3).build(); + while (true) { + boolean more = s.next(results, scannerContext); + if ((index >> 1) < 5) { + if (index % 2 == 0) { + assertEquals(3, results.size()); + } else { + assertEquals(1, results.size()); + } } else { assertEquals(1, results.size()); } - } else { - assertEquals(1, results.size()); - } - results.clear(); - index++; - if (!more) { - break; + results.clear(); + index++; + if (!more) { + break; + } } } } @@ -4382,15 +4377,15 @@ public boolean isFamilyEssential(byte[] name) { public void testScannerOperationId() throws IOException { region = initHRegion(tableName, method, CONF, COLUMN_FAMILY_BYTES); Scan scan = new Scan(); - RegionScanner scanner = region.getScanner(scan); - assertNull(scanner.getOperationId()); - scanner.close(); + try (RegionScanner scanner = region.getScanner(scan)) { + assertNull(scanner.getOperationId()); + } String operationId = "test_operation_id_0101"; scan = new Scan().setId(operationId); - scanner = region.getScanner(scan); - assertEquals(operationId, scanner.getOperationId()); - scanner.close(); + try (RegionScanner scanner = region.getScanner(scan)) { + assertEquals(operationId, scanner.getOperationId()); + } HBaseTestingUtil.closeRegionAndWAL(this.region); } @@ -4458,12 +4453,14 @@ public void testFlushCacheWhileScanning() throws IOException, InterruptedExcepti if (i != 0 && i % flushAndScanInterval == 0) { res.clear(); - InternalScanner scanner = region.getScanner(scan); - if (toggle) { - flushThread.flush(); + try (InternalScanner scanner = region.getScanner(scan)) { + if (toggle) { + flushThread.flush(); + } + while (scanner.next(res)) { + // ignore + } } - while (scanner.next(res)) - ; if (!toggle) { flushThread.flush(); } @@ -4883,13 +4880,14 @@ Arrays. asList( new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, CompareOperator.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L)))))); - InternalScanner scanner = region.getScanner(idxScan); - List res = new ArrayList<>(); + try (InternalScanner scanner = region.getScanner(idxScan)) { + List res = new ArrayList<>(); - while (scanner.next(res)) { - // Ignore res value. + while (scanner.next(res)) { + // Ignore res value. + } + assertEquals(1L, res.size()); } - assertEquals(1L, res.size()); } // //////////////////////////////////////////////////////////////////////////// @@ -5751,8 +5749,7 @@ protected void assertScan(final HRegion r, final byte[] fs, final byte[] firstVa Scan scan = new Scan(); for (int i = 0; i < families.length; i++) scan.addFamily(families[i]); - InternalScanner s = r.getScanner(scan); - try { + try (InternalScanner s = r.getScanner(scan)) { List curVals = new ArrayList<>(); boolean first = true; OUTER_LOOP: while (s.next(curVals)) { @@ -5768,8 +5765,6 @@ protected void assertScan(final HRegion r, final byte[] fs, final byte[] firstVa } } } - } finally { - s.close(); } } @@ -5908,26 +5903,26 @@ public void testReverseScanner_FromMemStore_SingleCF_Normal() throws IOException Scan scan = new Scan().withStartRow(rowC); scan.readVersions(5); scan.setReversed(true); - InternalScanner scanner = region.getScanner(scan); - List currRow = new ArrayList<>(); - boolean hasNext = scanner.next(currRow); - assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowC, 0, rowC.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowB, 0, rowB.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowA, 0, rowA.length)); - assertFalse(hasNext); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + List currRow = new ArrayList<>(); + boolean hasNext = scanner.next(currRow); + assertEquals(2, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); + assertFalse(hasNext); + } } @Test @@ -5961,25 +5956,25 @@ public void testReverseScanner_FromMemStore_SingleCF_LargerKey() throws IOExcept List currRow = new ArrayList<>(); scan.setReversed(true); scan.readVersions(5); - InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next(currRow); - assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowC, 0, rowC.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowB, 0, rowB.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowA, 0, rowA.length)); - assertFalse(hasNext); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(currRow); + assertEquals(2, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); + assertFalse(hasNext); + } } @Test @@ -6010,25 +6005,25 @@ public void testReverseScanner_FromMemStore_SingleCF_FullScan() throws IOExcepti Scan scan = new Scan(); List currRow = new ArrayList<>(); scan.setReversed(true); - InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowC, 0, rowC.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowB, 0, rowB.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowA, 0, rowA.length)); - assertFalse(hasNext); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); + assertFalse(hasNext); + } } @Test @@ -6075,36 +6070,37 @@ public void testReverseScanner_moreRowsMayExistAfter() throws IOException { scan.addColumn(families[0], col1); scan.setReversed(true); List currRow = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowD, 0, rowD.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowC, 0, rowC.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowB, 0, rowB.length)); - assertFalse(hasNext); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); + assertFalse(hasNext); + } scan = new Scan().withStartRow(rowD).withStopRow(rowA); scan.addColumn(families[0], col2); scan.setReversed(true); currRow.clear(); - scanner = region.getScanner(scan); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowD, 0, rowD.length)); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(hasNext); + } } @Test @@ -6153,36 +6149,37 @@ public void testReverseScanner_smaller_blocksize() throws IOException { scan.addColumn(families[0], col1); scan.setReversed(true); List currRow = new ArrayList<>(); - InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowD, 0, rowD.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowC, 0, rowC.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowB, 0, rowB.length)); - assertFalse(hasNext); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); + assertFalse(hasNext); + } scan = new Scan().withStartRow(rowD).withStopRow(rowA); scan.addColumn(families[0], col2); scan.setReversed(true); currRow.clear(); - scanner = region.getScanner(scan); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), rowD, 0, rowD.length)); - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + boolean hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(hasNext); + } } @Test @@ -6274,60 +6271,59 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() throws IOExcept scan.readVersions(5); scan.setBatch(3); scan.setReversed(true); - InternalScanner scanner = region.getScanner(scan); - List currRow = new ArrayList<>(); - boolean hasNext = false; - // 1. scan out "row4" (5 kvs), "row5" can't be scanned out since not - // included in scan range - // "row4" takes 2 next() calls since batch=3 - hasNext = scanner.next(currRow); - assertEquals(3, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row4, 0, row4.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row4, 0, row4.length)); - assertTrue(hasNext); - // 2. scan out "row3" (2 kv) - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row3, 0, row3.length)); - assertTrue(hasNext); - // 3. scan out "row2" (4 kvs) - // "row2" takes 2 next() calls since batch=3 - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(3, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row2, 0, row2.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row2, 0, row2.length)); - assertTrue(hasNext); - // 4. scan out "row1" (2 kv) - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row1, 0, row1.length)); - assertTrue(hasNext); - // 5. scan out "row0" (1 kv) - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row0, 0, row0.length)); - assertFalse(hasNext); - - scanner.close(); + try (InternalScanner scanner = region.getScanner(scan)) { + List currRow = new ArrayList<>(); + boolean hasNext = false; + // 1. scan out "row4" (5 kvs), "row5" can't be scanned out since not + // included in scan range + // "row4" takes 2 next() calls since batch=3 + hasNext = scanner.next(currRow); + assertEquals(3, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(2, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(hasNext); + // 2. scan out "row3" (2 kv) + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(2, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row3, 0, row3.length)); + assertTrue(hasNext); + // 3. scan out "row2" (4 kvs) + // "row2" takes 2 next() calls since batch=3 + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(3, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(hasNext); + // 4. scan out "row1" (2 kv) + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(2, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row1, 0, row1.length)); + assertTrue(hasNext); + // 5. scan out "row0" (1 kv) + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row0, 0, row0.length)); + assertFalse(hasNext); + } } @Test @@ -6374,31 +6370,32 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() throws IOExcept Scan scan = new Scan().withStartRow(row4); scan.setReversed(true); scan.setBatch(10); - InternalScanner scanner = region.getScanner(scan); - List currRow = new ArrayList<>(); - boolean hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row4, 0, row4.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row3, 0, row3.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row2, 0, row2.length)); - assertTrue(hasNext); - currRow.clear(); - hasNext = scanner.next(currRow); - assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row1, 0, row1.length)); - assertFalse(hasNext); + try (InternalScanner scanner = region.getScanner(scan)) { + List currRow = new ArrayList<>(); + boolean hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row3, 0, row3.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(hasNext); + currRow.clear(); + hasNext = scanner.next(currRow); + assertEquals(1, currRow.size()); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row1, 0, row1.length)); + assertFalse(hasNext); + } } /** @@ -6422,35 +6419,35 @@ public void testReverseScanner_StackOverflow() throws IOException { Scan scan = new Scan().withStartRow(Bytes.toBytes("19998")); scan.setReversed(true); - InternalScanner scanner = region.getScanner(scan); - - // create one storefile contains many rows will be skipped - // to check StoreFileScanner.seekToPreviousRow - for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes("" + i)); - p.addColumn(cf1, col, Bytes.toBytes("" + i)); - region.put(p); - } - region.flushcache(true, true, FlushLifeCycleTracker.DUMMY); + try (InternalScanner scanner = region.getScanner(scan)) { + // create one storefile contains many rows will be skipped + // to check StoreFileScanner.seekToPreviousRow + for (int i = 10000; i < 20000; i++) { + Put p = new Put(Bytes.toBytes("" + i)); + p.addColumn(cf1, col, Bytes.toBytes("" + i)); + region.put(p); + } + region.flushcache(true, true, FlushLifeCycleTracker.DUMMY); + + // create one memstore contains many rows will be skipped + // to check MemStoreScanner.seekToPreviousRow + for (int i = 10000; i < 20000; i++) { + Put p = new Put(Bytes.toBytes("" + i)); + p.addColumn(cf1, col, Bytes.toBytes("" + i)); + region.put(p); + } - // create one memstore contains many rows will be skipped - // to check MemStoreScanner.seekToPreviousRow - for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes("" + i)); - p.addColumn(cf1, col, Bytes.toBytes("" + i)); - region.put(p); + List currRow = new ArrayList<>(); + boolean hasNext; + do { + hasNext = scanner.next(currRow); + } while (hasNext); + assertEquals(2, currRow.size()); + assertEquals("19998", Bytes.toString(currRow.get(0).getRowArray(), + currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); + assertEquals("19997", Bytes.toString(currRow.get(1).getRowArray(), + currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); } - - List currRow = new ArrayList<>(); - boolean hasNext; - do { - hasNext = scanner.next(currRow); - } while (hasNext); - assertEquals(2, currRow.size()); - assertEquals("19998", Bytes.toString(currRow.get(0).getRowArray(), - currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); - assertEquals("19997", Bytes.toString(currRow.get(1).getRowArray(), - currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); } @Test @@ -6469,37 +6466,38 @@ public void testReverseScanShouldNotScanMemstoreIfReadPtLesser() throws Exceptio // create a reverse scan Scan scan = new Scan().withStartRow(Bytes.toBytes("19996")); scan.setReversed(true); - RegionScannerImpl scanner = region.getScanner(scan); - - // flush the cache. This will reset the store scanner - region.flushcache(true, true, FlushLifeCycleTracker.DUMMY); - - // create one memstore contains many rows will be skipped - // to check MemStoreScanner.seekToPreviousRow - for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes("" + i)); - p.addColumn(cf1, col, Bytes.toBytes("" + i)); - region.put(p); - } - List currRow = new ArrayList<>(); - boolean hasNext; - boolean assertDone = false; - do { - hasNext = scanner.next(currRow); - // With HBASE-15871, after the scanner is reset the memstore scanner should not be - // added here - if (!assertDone) { - StoreScanner current = (StoreScanner) (scanner.storeHeap).getCurrentForTesting(); - List scanners = current.getAllScannersForTesting(); - assertEquals("There should be only one scanner the store file scanner", 1, scanners.size()); - assertDone = true; + try (RegionScannerImpl scanner = region.getScanner(scan)) { + // flush the cache. This will reset the store scanner + region.flushcache(true, true, FlushLifeCycleTracker.DUMMY); + + // create one memstore contains many rows will be skipped + // to check MemStoreScanner.seekToPreviousRow + for (int i = 10000; i < 20000; i++) { + Put p = new Put(Bytes.toBytes("" + i)); + p.addColumn(cf1, col, Bytes.toBytes("" + i)); + region.put(p); } - } while (hasNext); - assertEquals(2, currRow.size()); - assertEquals("19996", Bytes.toString(currRow.get(0).getRowArray(), - currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); - assertEquals("19995", Bytes.toString(currRow.get(1).getRowArray(), - currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); + List currRow = new ArrayList<>(); + boolean hasNext; + boolean assertDone = false; + do { + hasNext = scanner.next(currRow); + // With HBASE-15871, after the scanner is reset the memstore scanner should not be + // added here + if (!assertDone) { + StoreScanner current = (StoreScanner) (scanner.storeHeap).getCurrentForTesting(); + List scanners = current.getAllScannersForTesting(); + assertEquals("There should be only one scanner the store file scanner", 1, + scanners.size()); + assertDone = true; + } + } while (hasNext); + assertEquals(2, currRow.size()); + assertEquals("19996", Bytes.toString(currRow.get(0).getRowArray(), + currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); + assertEquals("19995", Bytes.toString(currRow.get(1).getRowArray(), + currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); + } } @Test @@ -6520,25 +6518,25 @@ public void testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception { // Create a reverse scan Scan scan = new Scan().withStartRow(Bytes.toBytes("199996")); scan.setReversed(true); - RegionScannerImpl scanner = region.getScanner(scan); + try (RegionScannerImpl scanner = region.getScanner(scan)) { + // Put a lot of cells that have sequenceIDs grater than the readPt of the reverse scan + for (int i = 100000; i < 200000; i++) { + Put p = new Put(Bytes.toBytes("" + i)); + p.addColumn(cf1, col, Bytes.toBytes("" + i)); + region.put(p); + } + List currRow = new ArrayList<>(); + boolean hasNext; + do { + hasNext = scanner.next(currRow); + } while (hasNext); - // Put a lot of cells that have sequenceIDs grater than the readPt of the reverse scan - for (int i = 100000; i < 200000; i++) { - Put p = new Put(Bytes.toBytes("" + i)); - p.addColumn(cf1, col, Bytes.toBytes("" + i)); - region.put(p); + assertEquals(2, currRow.size()); + assertEquals("199996", Bytes.toString(currRow.get(0).getRowArray(), + currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); + assertEquals("199995", Bytes.toString(currRow.get(1).getRowArray(), + currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); } - List currRow = new ArrayList<>(); - boolean hasNext; - do { - hasNext = scanner.next(currRow); - } while (hasNext); - - assertEquals(2, currRow.size()); - assertEquals("199996", Bytes.toString(currRow.get(0).getRowArray(), - currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); - assertEquals("199995", Bytes.toString(currRow.get(1).getRowArray(), - currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); } @Test @@ -6987,11 +6985,11 @@ private void checkScan(int expectCellSize) throws IOException { Scan s = new Scan().withStartRow(row); ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true); ScannerContext scannerContext = contextBuilder.build(); - RegionScanner scanner = region.getScanner(s); - List kvs = new ArrayList<>(); - scanner.next(kvs, scannerContext); - assertEquals(expectCellSize, kvs.size()); - scanner.close(); + try (RegionScanner scanner = region.getScanner(s)) { + List kvs = new ArrayList<>(); + scanner.next(kvs, scannerContext); + assertEquals(expectCellSize, kvs.size()); + } } @Test @@ -7615,7 +7613,7 @@ public void run() { holder.start(); latch.await(); - region.close(); + HBaseTestingUtil.closeRegionAndWAL(region); region = null; holder.join(); From c3ee1dd9ef75b9cb4de7ad6a608eda31e6fb9a37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 25 Feb 2024 17:13:36 +0800 Subject: [PATCH 261/514] HBASE-28402 Bump cryptography in /dev-support/git-jira-release-audit (#5694) Bumps [cryptography](https://github.com/pyca/cryptography) from 42.0.2 to 42.0.4. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/42.0.2...42.0.4) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 5e402a1875f4..3791481daf5e 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -19,7 +19,7 @@ blessed==1.17.0 certifi==2023.7.22 cffi==1.13.2 chardet==3.0.4 -cryptography==42.0.2 +cryptography==42.0.4 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 From c4a02f7fcd1d74385b87bb761d25b118ce080119 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 25 Feb 2024 20:50:13 +0800 Subject: [PATCH 262/514] HBASE-28321 RpcConnectionRegistry is broken when security is enabled and we use different principal for master and region server (#5688) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/client/ConnectionUtils.java | 2 +- .../hadoop/hbase/ipc/AbstractRpcClient.java | 7 + .../hbase/ipc/BlockingRpcConnection.java | 104 +++++-- .../org/apache/hadoop/hbase/ipc/Call.java | 8 +- .../org/apache/hadoop/hbase/ipc/IPCUtil.java | 5 + .../hadoop/hbase/ipc/NettyRpcConnection.java | 127 ++++++-- .../hbase/ipc/NettyRpcDuplexHandler.java | 2 +- .../hadoop/hbase/ipc/PreambleCallHandler.java | 95 ++++++ .../apache/hadoop/hbase/ipc/RpcClient.java | 2 + .../hadoop/hbase/ipc/RpcConnection.java | 137 ++++++++- .../ipc/SecurityNotEnabledException.java | 34 +++ .../security/AbstractHBaseSaslRpcClient.java | 36 +-- .../hbase/security/HBaseSaslRpcClient.java | 8 +- .../security/NettyHBaseSaslRpcClient.java | 4 +- .../NettyHBaseSaslRpcClientHandler.java | 4 +- .../hadoop/hbase/security/SecurityInfo.java | 30 +- ...igestSaslClientAuthenticationProvider.java | 7 +- .../GssSaslClientAuthenticationProvider.java | 45 +-- .../SaslClientAuthenticationProvider.java | 31 +- ...impleSaslClientAuthenticationProvider.java | 3 +- .../security/TestHBaseSaslRpcClient.java | 30 +- ...ShadeSaslClientAuthenticationProvider.java | 7 +- .../src/main/protobuf/rpc/RPC.proto | 4 + .../apache/hadoop/hbase/ipc/RpcServer.java | 7 +- .../hadoop/hbase/ipc/ServerRpcConnection.java | 32 +- .../hbase/ipc/SimpleServerRpcConnection.java | 2 +- .../ipc/TestMultipleServerPrincipalsIPC.java | 277 ++++++++++++++++++ .../ipc/TestRpcSkipInitialSaslHandshake.java | 6 +- .../ipc/TestSecurityRpcSentBytesMetrics.java | 5 +- .../hbase/security/AbstractTestSecureIPC.java | 12 +- ...tipleServerPrincipalsFallbackToSimple.java | 189 ++++++++++++ .../TestSaslTlsIPCRejectPlainText.java | 5 +- ...tomSaslAuthenticationProviderTestBase.java | 3 +- 33 files changed, 1084 insertions(+), 186 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PreambleCallHandler.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SecurityNotEnabledException.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMultipleServerPrincipalsIPC.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMultipleServerPrincipalsFallbackToSimple.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index d073fef929fd..84acc6e4d398 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -665,7 +665,7 @@ static void setCoprocessorError(RpcController controller, Throwable error) { } } - static boolean isUnexpectedPreambleHeaderException(IOException e) { + public static boolean isUnexpectedPreambleHeaderException(IOException e) { if (!(e instanceof RemoteException)) { return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 7972cc08acd2..3742eb8118a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.toIOE; import static org.apache.hadoop.hbase.ipc.IPCUtil.wrapException; +import com.google.errorprone.annotations.RestrictedApi; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; @@ -542,6 +543,12 @@ public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) { return new RpcChannelImplementation(this, createAddr(sn), user, rpcTimeout); } + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + PoolMap getConnections() { + return connections; + } + private static class AbstractRpcChannel { protected final Address addr; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index 3f1418aa9849..4b3d2de466b7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -37,12 +37,14 @@ import java.util.ArrayDeque; import java.util.Locale; import java.util.Queue; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ThreadLocalRandom; import javax.security.sasl.SaslException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback; @@ -352,13 +354,13 @@ private void disposeSasl() { } } - private boolean setupSaslConnection(final InputStream in2, final OutputStream out2) - throws IOException { + private boolean setupSaslConnection(final InputStream in2, final OutputStream out2, + String serverPrincipal) throws IOException { if (this.metrics != null) { this.metrics.incrNsLookups(); } saslRpcClient = new HBaseSaslRpcClient(this.rpcClient.conf, provider, token, - socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, + socket.getInetAddress(), serverPrincipal, this.rpcClient.fallbackAllowed, this.rpcClient.conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); @@ -379,7 +381,8 @@ private boolean setupSaslConnection(final InputStream in2, final OutputStream ou *

*/ private void handleSaslConnectionFailure(final int currRetries, final int maxRetries, - final Exception ex, final UserGroupInformation user) throws IOException, InterruptedException { + final Exception ex, final UserGroupInformation user, final String serverPrincipal) + throws IOException, InterruptedException { closeSocket(); user.doAs(new PrivilegedExceptionAction() { @Override @@ -419,17 +422,22 @@ public Object run() throws IOException, InterruptedException { Thread.sleep(ThreadLocalRandom.current().nextInt(reloginMaxBackoff) + 1); return null; } else { - String msg = - "Failed to initiate connection for " + UserGroupInformation.getLoginUser().getUserName() - + " to " + securityInfo.getServerPrincipal(); + String msg = "Failed to initiate connection for " + + UserGroupInformation.getLoginUser().getUserName() + " to " + serverPrincipal; throw new IOException(msg, ex); } } }); } - private void getConnectionRegistry(OutputStream outStream) throws IOException { + private void getConnectionRegistry(InputStream inStream, OutputStream outStream, + Call connectionRegistryCall) throws IOException { outStream.write(RpcClient.REGISTRY_PREAMBLE_HEADER); + readResponse(new DataInputStream(inStream), calls, connectionRegistryCall, remoteExc -> { + synchronized (this) { + closeConn(remoteExc); + } + }); } private void createStreams(InputStream inStream, OutputStream outStream) { @@ -437,7 +445,52 @@ private void createStreams(InputStream inStream, OutputStream outStream) { this.out = new DataOutputStream(new BufferedOutputStream(outStream)); } - private void setupIOstreams() throws IOException { + // choose the server principal to use + private String chooseServerPrincipal(InputStream inStream, OutputStream outStream) + throws IOException { + Set serverPrincipals = getServerPrincipals(); + if (serverPrincipals.size() == 1) { + return serverPrincipals.iterator().next(); + } + // this means we use kerberos authentication and there are multiple server principal candidates, + // in this way we need to send a special preamble header to get server principal from server + Call securityPreambleCall = createSecurityPreambleCall(r -> { + }); + outStream.write(RpcClient.SECURITY_PREAMBLE_HEADER); + readResponse(new DataInputStream(inStream), calls, securityPreambleCall, remoteExc -> { + synchronized (this) { + closeConn(remoteExc); + } + }); + if (securityPreambleCall.error != null) { + LOG.debug("Error when trying to do a security preamble call to {}", remoteId.address, + securityPreambleCall.error); + if (ConnectionUtils.isUnexpectedPreambleHeaderException(securityPreambleCall.error)) { + // this means we are connecting to an old server which does not support the security + // preamble call, so we should fallback to randomly select a principal to use + // TODO: find a way to reconnect without failing all the pending calls, for now, when we + // reach here, shutdown should have already been scheduled + throw securityPreambleCall.error; + } + if (IPCUtil.isSecurityNotEnabledException(securityPreambleCall.error)) { + // server tells us security is not enabled, then we should check whether fallback to + // simple is allowed, if so we just go without security, otherwise we should fail the + // negotiation immediately + if (rpcClient.fallbackAllowed) { + // TODO: just change the preamble and skip the fallback to simple logic, for now, just + // select the first principal can finish the connection setup, but waste one client + // message + return serverPrincipals.iterator().next(); + } else { + throw new FallbackDisallowedException(); + } + } + return randomSelect(serverPrincipals); + } + return chooseServerPrincipal(serverPrincipals, securityPreambleCall); + } + + private void setupIOstreams(Call connectionRegistryCall) throws IOException { if (socket != null) { // The connection is already available. Perfect. return; @@ -465,32 +518,37 @@ private void setupIOstreams() throws IOException { // This creates a socket with a write timeout. This timeout cannot be changed. OutputStream outStream = NetUtils.getOutputStream(socket, this.rpcClient.writeTO); if (connectionRegistryCall != null) { - getConnectionRegistry(outStream); - createStreams(inStream, outStream); - break; + getConnectionRegistry(inStream, outStream, connectionRegistryCall); + closeSocket(); + return; } - // Write out the preamble -- MAGIC, version, and auth to use. - writeConnectionHeaderPreamble(outStream); + if (useSasl) { - final InputStream in2 = inStream; - final OutputStream out2 = outStream; UserGroupInformation ticket = provider.getRealUser(remoteId.ticket); boolean continueSasl; if (ticket == null) { throw new FatalConnectionException("ticket/user is null"); } + String serverPrincipal = chooseServerPrincipal(inStream, outStream); + // Write out the preamble -- MAGIC, version, and auth to use. + writeConnectionHeaderPreamble(outStream); try { + final InputStream in2 = inStream; + final OutputStream out2 = outStream; continueSasl = ticket.doAs(new PrivilegedExceptionAction() { @Override public Boolean run() throws IOException { - return setupSaslConnection(in2, out2); + return setupSaslConnection(in2, out2, serverPrincipal); } }); } catch (Exception ex) { ExceptionUtil.rethrowIfInterrupt(ex); - handleSaslConnectionFailure(numRetries++, reloginMaxRetries, ex, ticket); + saslNegotiationDone(serverPrincipal, false); + handleSaslConnectionFailure(numRetries++, reloginMaxRetries, ex, ticket, + serverPrincipal); continue; } + saslNegotiationDone(serverPrincipal, true); if (continueSasl) { // Sasl connect is successful. Let's set up Sasl i/o streams. inStream = saslRpcClient.getInputStream(); @@ -501,6 +559,9 @@ public Boolean run() throws IOException { // reconnecting because regionserver may change its sasl config after restart. saslRpcClient = null; } + } else { + // Write out the preamble -- MAGIC, version, and auth to use. + writeConnectionHeaderPreamble(outStream); } createStreams(inStream, outStream); // Now write out the connection header @@ -618,9 +679,10 @@ private void writeRequest(Call call) throws IOException { } RequestHeader requestHeader = buildRequestHeader(call, cellBlockMeta); if (call.isConnectionRegistryCall()) { - connectionRegistryCall = call; + setupIOstreams(call); + return; } - setupIOstreams(); + setupIOstreams(null); // Now we're going to write the call. We take the lock, then check that the connection // is still valid, and, if so we do the write to the socket. If the write fails, we don't @@ -655,7 +717,7 @@ private void writeRequest(Call call) throws IOException { */ private void readResponse() { try { - readResponse(in, calls, remoteExc -> { + readResponse(in, calls, null, remoteExc -> { synchronized (this) { closeConn(remoteExc); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index d175ea0b6e90..980e708d235c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -20,7 +20,6 @@ import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.util.Map; -import java.util.Optional; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.CellScanner; @@ -85,16 +84,15 @@ class Call { * Builds a simplified {@link #toString()} that includes just the id and method name. */ public String toShortString() { + // Call[id=32153218,methodName=Get] return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("id", id) - .append("methodName", md.getName()).toString(); + .append("methodName", md != null ? md.getName() : "").toString(); } @Override public String toString() { - // Call[id=32153218,methodName=Get] return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(toShortString()) - .append("param", Optional.ofNullable(param).map(ProtobufUtil::getShortTextFormat).orElse("")) - .toString(); + .append("param", param != null ? ProtobufUtil.getShortTextFormat(param) : "").toString(); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index bf4b833e856c..42094eb45e09 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -178,6 +178,11 @@ static boolean isFatalConnectionException(ExceptionResponse e) { } } + static boolean isSecurityNotEnabledException(IOException e) { + return e instanceof RemoteException + && SecurityNotEnabledException.class.getName().equals(((RemoteException) e).getClassName()); + } + static IOException toIOE(Throwable t) { if (t instanceof IOException) { return (IOException) t; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index a0f8f10d1cf9..1618709fa9bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -26,10 +26,13 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; +import javax.security.sasl.SaslException; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.io.crypto.tls.X509Util; import org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallEvent; import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback; @@ -157,7 +160,7 @@ public void cleanupConnection() { }); } - private void established(Channel ch) throws IOException { + private void established(Channel ch) { assert eventLoop.inEventLoop(); ch.pipeline() .addBefore(BufferCallBeforeInitHandler.NAME, null, @@ -169,6 +172,11 @@ private void established(Channel ch) throws IOException { .fireUserEventTriggered(BufferCallEvent.success()); } + private void saslEstablished(Channel ch, String serverPrincipal) { + saslNegotiationDone(serverPrincipal, true); + established(ch); + } + private boolean reloginInProgress; private void scheduleRelogin(Throwable error) { @@ -201,23 +209,31 @@ private void failInit(Channel ch, IOException e) { // fail all pending calls ch.pipeline().fireUserEventTriggered(BufferCallEvent.fail(e)); shutdown0(); + rpcClient.failedServers.addToFailedServers(remoteId.getAddress(), e); + } + + private void saslFailInit(Channel ch, String serverPrincipal, IOException error) { + assert eventLoop.inEventLoop(); + saslNegotiationDone(serverPrincipal, false); + failInit(ch, error); } - private void saslNegotiate(final Channel ch) { + private void saslNegotiate(Channel ch, String serverPrincipal) { assert eventLoop.inEventLoop(); + NettyFutureUtils.safeWriteAndFlush(ch, connectionHeaderPreamble.retainedDuplicate()); UserGroupInformation ticket = provider.getRealUser(remoteId.getTicket()); if (ticket == null) { - failInit(ch, new FatalConnectionException("ticket/user is null")); + saslFailInit(ch, serverPrincipal, new FatalConnectionException("ticket/user is null")); return; } Promise saslPromise = ch.eventLoop().newPromise(); final NettyHBaseSaslRpcClientHandler saslHandler; try { saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, provider, token, - ((InetSocketAddress) ch.remoteAddress()).getAddress(), securityInfo, + ((InetSocketAddress) ch.remoteAddress()).getAddress(), serverPrincipal, rpcClient.fallbackAllowed, this.rpcClient.conf); } catch (IOException e) { - failInit(ch, e); + saslFailInit(ch, serverPrincipal, e); return; } ch.pipeline().addBefore(BufferCallBeforeInitHandler.NAME, null, new SaslChallengeDecoder()) @@ -252,35 +268,99 @@ public void operationComplete(Future future) throws Exception { p.remove(NettyHBaseRpcConnectionHeaderHandler.class); // don't send connection header, NettyHBaseRpcConnectionHeaderHandler // sent it already - established(ch); + saslEstablished(ch, serverPrincipal); } else { final Throwable error = future.cause(); scheduleRelogin(error); - failInit(ch, toIOE(error)); + saslFailInit(ch, serverPrincipal, toIOE(error)); } } }); } else { // send the connection header to server ch.write(connectionHeaderWithLength.retainedDuplicate()); - established(ch); + saslEstablished(ch, serverPrincipal); } } else { final Throwable error = future.cause(); scheduleRelogin(error); - failInit(ch, toIOE(error)); + saslFailInit(ch, serverPrincipal, toIOE(error)); } } }); } - private void getConnectionRegistry(Channel ch) throws IOException { - established(ch); - NettyFutureUtils.safeWriteAndFlush(ch, - Unpooled.directBuffer(6).writeBytes(RpcClient.REGISTRY_PREAMBLE_HEADER)); + private void getConnectionRegistry(Channel ch, Call connectionRegistryCall) throws IOException { + assert eventLoop.inEventLoop(); + PreambleCallHandler.setup(ch.pipeline(), rpcClient.readTO, this, + RpcClient.REGISTRY_PREAMBLE_HEADER, connectionRegistryCall); } - private void connect() throws UnknownHostException { + private void onSecurityPreambleError(Channel ch, Set serverPrincipals, + IOException error) { + assert eventLoop.inEventLoop(); + LOG.debug("Error when trying to do a security preamble call to {}", remoteId.address, error); + if (ConnectionUtils.isUnexpectedPreambleHeaderException(error)) { + // this means we are connecting to an old server which does not support the security + // preamble call, so we should fallback to randomly select a principal to use + // TODO: find a way to reconnect without failing all the pending calls, for now, when we + // reach here, shutdown should have already been scheduled + return; + } + if (IPCUtil.isSecurityNotEnabledException(error)) { + // server tells us security is not enabled, then we should check whether fallback to + // simple is allowed, if so we just go without security, otherwise we should fail the + // negotiation immediately + if (rpcClient.fallbackAllowed) { + // TODO: just change the preamble and skip the fallback to simple logic, for now, just + // select the first principal can finish the connection setup, but waste one client + // message + saslNegotiate(ch, serverPrincipals.iterator().next()); + } else { + failInit(ch, new FallbackDisallowedException()); + } + return; + } + // usually we should not reach here, but for robust, just randomly select a principal to + // connect + saslNegotiate(ch, randomSelect(serverPrincipals)); + } + + private void onSecurityPreambleFinish(Channel ch, Set serverPrincipals, + Call securityPreambleCall) { + assert eventLoop.inEventLoop(); + String serverPrincipal; + try { + serverPrincipal = chooseServerPrincipal(serverPrincipals, securityPreambleCall); + } catch (SaslException e) { + failInit(ch, e); + return; + } + saslNegotiate(ch, serverPrincipal); + } + + private void saslNegotiate(Channel ch) throws IOException { + assert eventLoop.inEventLoop(); + Set serverPrincipals = getServerPrincipals(); + if (serverPrincipals.size() == 1) { + saslNegotiate(ch, serverPrincipals.iterator().next()); + return; + } + // this means we use kerberos authentication and there are multiple server principal candidates, + // in this way we need to send a special preamble header to get server principal from server + Call securityPreambleCall = createSecurityPreambleCall(call -> { + assert eventLoop.inEventLoop(); + if (call.error != null) { + onSecurityPreambleError(ch, serverPrincipals, call.error); + } else { + onSecurityPreambleFinish(ch, serverPrincipals, call); + } + }); + PreambleCallHandler.setup(ch.pipeline(), rpcClient.readTO, this, + RpcClient.SECURITY_PREAMBLE_HEADER, securityPreambleCall); + } + + private void connect(Call connectionRegistryCall) throws UnknownHostException { assert eventLoop.inEventLoop(); LOG.trace("Connecting to {}", remoteId.getAddress()); InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); @@ -310,16 +390,17 @@ protected void initChannel(Channel ch) throws Exception { private void succeed(Channel ch) throws IOException { if (connectionRegistryCall != null) { - getConnectionRegistry(ch); + getConnectionRegistry(ch, connectionRegistryCall); return; } - NettyFutureUtils.safeWriteAndFlush(ch, connectionHeaderPreamble.retainedDuplicate()); - if (useSasl) { - saslNegotiate(ch); - } else { - // send the connection header to server + if (!useSasl) { + // BufferCallBeforeInitHandler will call ctx.flush when receiving the + // BufferCallEvent.success() event, so here we just use write for the below two messages + NettyFutureUtils.safeWrite(ch, connectionHeaderPreamble.retainedDuplicate()); NettyFutureUtils.safeWrite(ch, connectionHeaderWithLength.retainedDuplicate()); established(ch); + } else { + saslNegotiate(ch); } } @@ -331,7 +412,6 @@ private void fail(Channel ch, Throwable error) { connectionRegistryCall.setException(ex); } failInit(ch, ex); - rpcClient.failedServers.addToFailedServers(remoteId.getAddress(), error); } @Override @@ -360,10 +440,9 @@ public void operationComplete(ChannelFuture future) throws Exception { private void sendRequest0(Call call, HBaseRpcController hrc) throws IOException { assert eventLoop.inEventLoop(); if (call.isConnectionRegistryCall()) { - connectionRegistryCall = call; // For get connection registry call, we will send a special preamble header to get the // response, instead of sending a real rpc call. See HBASE-25051 - connect(); + connect(call); return; } if (reloginInProgress) { @@ -386,7 +465,7 @@ public void run(boolean cancelled) throws IOException { setCancelled(call); } else { if (channel == null) { - connect(); + connect(null); } scheduleTimeoutTask(call); channel.writeAndFlush(call).addListener(new ChannelFutureListener() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 44772ae2dbf9..ef3752bbf47c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -122,7 +122,7 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOException { try { - conn.readResponse(new ByteBufInputStream(buf), id2Call, + conn.readResponse(new ByteBufInputStream(buf), id2Call, null, remoteExc -> exceptionCaught(ctx, remoteExc)); } catch (IOException e) { // In netty, the decoding the frame based, when reaching here we have already read a full diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PreambleCallHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PreambleCallHandler.java new file mode 100644 index 000000000000..1b2a7cf6acee --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PreambleCallHandler.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import java.util.HashMap; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.util.NettyFutureUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; +import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; +import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; +import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import org.apache.hbase.thirdparty.io.netty.handler.timeout.ReadTimeoutHandler; + +/** + * Used to decode preamble calls. + */ +@InterfaceAudience.Private +class PreambleCallHandler extends SimpleChannelInboundHandler { + + private final NettyRpcConnection conn; + + private final byte[] preambleHeader; + + private final Call preambleCall; + + PreambleCallHandler(NettyRpcConnection conn, byte[] preambleHeader, Call preambleCall) { + this.conn = conn; + this.preambleHeader = preambleHeader; + this.preambleCall = preambleCall; + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + NettyFutureUtils.safeWriteAndFlush(ctx, + Unpooled.directBuffer(preambleHeader.length).writeBytes(preambleHeader)); + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, ByteBuf buf) throws Exception { + try { + conn.readResponse(new ByteBufInputStream(buf), new HashMap<>(), preambleCall, + remoteExc -> exceptionCaught(ctx, remoteExc)); + } finally { + ChannelPipeline p = ctx.pipeline(); + p.remove("PreambleCallReadTimeoutHandler"); + p.remove("PreambleCallFrameDecoder"); + p.remove(this); + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + preambleCall.setException(new ConnectionClosedException("Connection closed")); + ctx.fireChannelInactive(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + preambleCall.setException(IPCUtil.toIOE(cause)); + } + + public static void setup(ChannelPipeline pipeline, int readTimeoutMs, NettyRpcConnection conn, + byte[] preambleHeader, Call preambleCall) { + // we do not use single decode here, as for a preamble call, we do not expect the server side + // will return multiple responses + pipeline + .addBefore(BufferCallBeforeInitHandler.NAME, "PreambleCallReadTimeoutHandler", + new ReadTimeoutHandler(readTimeoutMs, TimeUnit.MILLISECONDS)) + .addBefore(BufferCallBeforeInitHandler.NAME, "PreambleCallFrameDecoder", + new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4)) + .addBefore(BufferCallBeforeInitHandler.NAME, "PreambleCallHandler", + new PreambleCallHandler(conn, preambleHeader, preambleCall)); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 369430e337ae..7011dc5e1397 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -56,6 +56,8 @@ public interface RpcClient extends Closeable { byte[] REGISTRY_PREAMBLE_HEADER = new byte[] { 'R', 'e', 'g', 'i', 's', 't' }; + byte[] SECURITY_PREAMBLE_HEADER = new byte[] { 'S', 'e', 'c', 'u', 'r', 'i' }; + /** * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up protobuf * blocking stubs. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 65f936d6fc38..8017e99ec4ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -21,17 +21,26 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.util.Collection; +import java.util.Collections; import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import javax.security.sasl.SaslException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.security.AuthMethod; +import org.apache.hadoop.hbase.security.SecurityConstants; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; @@ -40,6 +49,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.yetus.audience.InterfaceAudience; @@ -47,6 +57,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; @@ -58,6 +69,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.SecurityPreamableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; /** @@ -100,6 +112,12 @@ abstract class RpcConnection { protected SaslClientAuthenticationProvider provider; + // Record the server principal which we have successfully authenticated with the remote server + // this is used to save the extra round trip with server when there are multiple candidate server + // principals for a given rpc service, like ClientMetaService. + // See HBASE-28321 for more details. + private String lastSucceededServerPrincipal; + protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId, String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, CellBlockBuilder cellBlockBuilder, MetricsConnection metrics, @@ -221,6 +239,96 @@ protected final InetSocketAddress getRemoteInetAddress(MetricsConnection metrics return remoteAddr; } + private static boolean useCanonicalHostname(Configuration conf) { + return !conf.getBoolean( + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS, + SecurityConstants.DEFAULT_UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS); + } + + private static String getHostnameForServerPrincipal(Configuration conf, InetAddress addr) { + final String hostname; + if (useCanonicalHostname(conf)) { + hostname = addr.getCanonicalHostName(); + if (hostname.equals(addr.getHostAddress())) { + LOG.warn("Canonical hostname for SASL principal is the same with IP address: " + hostname + + ", " + addr.getHostName() + ". Check DNS configuration or consider " + + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS + "=true"); + } + } else { + hostname = addr.getHostName(); + } + + return hostname.toLowerCase(); + } + + private static String getServerPrincipal(Configuration conf, String serverKey, InetAddress server) + throws IOException { + String hostname = getHostnameForServerPrincipal(conf, server); + return SecurityUtil.getServerPrincipal(conf.get(serverKey), hostname); + } + + protected final boolean isKerberosAuth() { + return provider.getSaslAuthMethod().getCode() == AuthMethod.KERBEROS.code; + } + + protected final Set getServerPrincipals() throws IOException { + // for authentication method other than kerberos, we do not need to know the server principal + if (!isKerberosAuth()) { + return Collections.singleton(HConstants.EMPTY_STRING); + } + // if we have successfully authenticated last time, just return the server principal we use last + // time + if (lastSucceededServerPrincipal != null) { + return Collections.singleton(lastSucceededServerPrincipal); + } + InetAddress server = + new InetSocketAddress(remoteId.address.getHostName(), remoteId.address.getPort()) + .getAddress(); + // Even if we have multiple config key in security info, it is still possible that we configured + // the same principal for them, so here we use a Set + Set serverPrincipals = new TreeSet<>(); + for (String serverPrincipalKey : securityInfo.getServerPrincipals()) { + serverPrincipals.add(getServerPrincipal(conf, serverPrincipalKey, server)); + } + return serverPrincipals; + } + + protected final T randomSelect(Collection c) { + int select = ThreadLocalRandom.current().nextInt(c.size()); + int index = 0; + for (T t : c) { + if (index == select) { + return t; + } + index++; + } + return null; + } + + protected final String chooseServerPrincipal(Set candidates, Call securityPreambleCall) + throws SaslException { + String principal = + ((SecurityPreamableResponse) securityPreambleCall.response).getServerPrincipal(); + if (!candidates.contains(principal)) { + // this means the server returns principal which is not in our candidates, it could be a + // malicious server, stop connecting + throw new SaslException(remoteId.address + " tells us to use server principal " + principal + + " which is not expected, should be one of " + candidates); + } + return principal; + } + + protected final void saslNegotiationDone(String serverPrincipal, boolean succeed) { + LOG.debug("sasl negotiation done with serverPrincipal = {}, succeed = {}", serverPrincipal, + succeed); + if (succeed) { + this.lastSucceededServerPrincipal = serverPrincipal; + } else { + // clear the recorded principal if authentication failed + this.lastSucceededServerPrincipal = null; + } + } + protected abstract void callTimeout(Call call); public ConnectionId remoteId() { @@ -252,7 +360,10 @@ public void setLastTouched(long lastTouched) { */ public abstract void cleanupConnection(); - protected Call connectionRegistryCall; + protected final Call createSecurityPreambleCall(RpcCallback callback) { + return new Call(-1, null, null, null, SecurityPreamableResponse.getDefaultInstance(), 0, 0, + Collections.emptyMap(), callback, MetricsConnection.newCallStats()); + } private void finishCall(ResponseHeader responseHeader, T in, Call call) throws IOException { @@ -286,7 +397,7 @@ private void finishCall(ResponseHeader respo } void readResponse(T in, Map id2Call, - Consumer fatalConnectionErrorConsumer) throws IOException { + Call preambleCall, Consumer fatalConnectionErrorConsumer) throws IOException { int totalSize = in.readInt(); ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); int id = responseHeader.getCallId(); @@ -301,9 +412,8 @@ void readResponse(T in, Map i if (IPCUtil.isFatalConnectionException(exceptionResponse)) { // Here we will cleanup all calls so do not need to fall back, just return. fatalConnectionErrorConsumer.accept(remoteExc); - if (connectionRegistryCall != null) { - connectionRegistryCall.setException(remoteExc); - connectionRegistryCall = null; + if (preambleCall != null) { + preambleCall.setException(remoteExc); } return; } @@ -311,10 +421,19 @@ void readResponse(T in, Map i remoteExc = null; } if (id < 0) { - if (connectionRegistryCall != null) { - LOG.debug("process connection registry call"); - finishCall(responseHeader, in, connectionRegistryCall); - connectionRegistryCall = null; + LOG.debug("process preamble call response with response type {}", + preambleCall != null + ? preambleCall.responseDefaultType.getDescriptorForType().getName() + : "null"); + if (preambleCall == null) { + // fall through so later we will skip this response + LOG.warn("Got a negative call id {} but there is no preamble call", id); + } else { + if (remoteExc != null) { + preambleCall.setException(remoteExc); + } else { + finishCall(responseHeader, in, preambleCall); + } return; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SecurityNotEnabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SecurityNotEnabledException.java new file mode 100644 index 000000000000..207188de8c6e --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SecurityNotEnabledException.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Will be thrown when server received a security preamble call for asking the server principal but + * security is not enabled for this server. + *

+ * This exception will not be thrown to upper layer so mark it as IA.Private. + */ +@InterfaceAudience.Private +public class SecurityNotEnabledException extends HBaseIOException { + + private static final long serialVersionUID = -3682812966232247662L; + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java index 87b2287a6014..4e6f2eab4781 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java @@ -45,38 +45,38 @@ public abstract class AbstractHBaseSaslRpcClient { /** * Create a HBaseSaslRpcClient for an authentication method - * @param conf the configuration object - * @param provider the authentication provider - * @param token token to use if needed by the authentication method - * @param serverAddr the address of the hbase service - * @param securityInfo the security details for the remote hbase service - * @param fallbackAllowed does the client allow fallback to simple authentication + * @param conf the configuration object + * @param provider the authentication provider + * @param token token to use if needed by the authentication method + * @param serverAddr the address of the hbase service + * @param servicePrincipal the service principal to use if needed by the authentication method + * @param fallbackAllowed does the client allow fallback to simple authentication */ protected AbstractHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) throws IOException { - this(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, "authentication"); + InetAddress serverAddr, String servicePrincipal, boolean fallbackAllowed) throws IOException { + this(conf, provider, token, serverAddr, servicePrincipal, fallbackAllowed, "authentication"); } /** * Create a HBaseSaslRpcClient for an authentication method - * @param conf configuration object - * @param provider the authentication provider - * @param token token to use if needed by the authentication method - * @param serverAddr the address of the hbase service - * @param securityInfo the security details for the remote hbase service - * @param fallbackAllowed does the client allow fallback to simple authentication - * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") + * @param conf configuration object + * @param provider the authentication provider + * @param token token to use if needed by the authentication method + * @param serverAddr the address of the hbase service + * @param servicePrincipal the service principal to use if needed by the authentication method + * @param fallbackAllowed does the client allow fallback to simple authentication + * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") */ protected AbstractHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, - String rpcProtection) throws IOException { + InetAddress serverAddr, String servicePrincipal, boolean fallbackAllowed, String rpcProtection) + throws IOException { this.fallbackAllowed = fallbackAllowed; saslProps = SaslUtil.initSaslProperties(rpcProtection); saslClient = - provider.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); + provider.createClient(conf, serverAddr, servicePrincipal, token, fallbackAllowed, saslProps); if (saslClient == null) { throw new IOException( "Authentication provider " + provider.getClass() + " returned a null SaslClient"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index ace1c38ab22a..ebf0a7f875fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -63,15 +63,15 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { private boolean initStreamForCrypto; public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, + Token token, InetAddress serverAddr, String servicePrincipal, boolean fallbackAllowed) throws IOException { - super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed); + super(conf, provider, token, serverAddr, servicePrincipal, fallbackAllowed); } public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, + Token token, InetAddress serverAddr, String servicePrincipal, boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) throws IOException { - super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); + super(conf, provider, token, serverAddr, servicePrincipal, fallbackAllowed, rpcProtection); this.initStreamForCrypto = initStreamForCrypto; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java index fe5481a10b25..47d380d71046 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java @@ -40,9 +40,9 @@ public class NettyHBaseSaslRpcClient extends AbstractHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(NettyHBaseSaslRpcClient.class); public NettyHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, - Token token, InetAddress serverAddr, SecurityInfo securityInfo, + Token token, InetAddress serverAddr, String serverPrincipal, boolean fallbackAllowed, String rpcProtection) throws IOException { - super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); + super(conf, provider, token, serverAddr, serverPrincipal, fallbackAllowed, rpcProtection); } public void setupSaslHandler(ChannelPipeline p, String addAfter) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index cc71355d4297..567b5675b710 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -68,14 +68,14 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler< */ public NettyHBaseSaslRpcClientHandler(Promise saslPromise, UserGroupInformation ugi, SaslClientAuthenticationProvider provider, Token token, - InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, Configuration conf) + InetAddress serverAddr, String serverPrincipal, boolean fallbackAllowed, Configuration conf) throws IOException { this.saslPromise = saslPromise; this.ugi = ugi; this.conf = conf; this.provider = provider; this.saslRpcClient = new NettyHBaseSaslRpcClient(conf, provider, token, serverAddr, - securityInfo, fallbackAllowed, conf.get("hbase.rpc.protection", + serverPrincipal, fallbackAllowed, conf.get("hbase.rpc.protection", SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index 2e16d5646953..a33f49573dee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.hbase.security; +import java.util.Arrays; +import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; import org.apache.hadoop.hbase.shaded.protobuf.generated.BootstrapNodeProtos; @@ -51,7 +55,8 @@ public class SecurityInfo { infos.put(MasterProtos.HbckService.getDescriptor().getName(), new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegistryProtos.ClientMetaService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(Kind.HBASE_AUTH_TOKEN, SecurityConstants.MASTER_KRB_PRINCIPAL, + SecurityConstants.REGIONSERVER_KRB_PRINCIPAL)); infos.put(BootstrapNodeProtos.BootstrapNodeService.getDescriptor().getName(), new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(LockServiceProtos.LockService.getDescriptor().getName(), @@ -75,16 +80,33 @@ public static SecurityInfo getInfo(String serviceName) { return infos.get(serviceName); } - private final String serverPrincipal; + private final List serverPrincipals; private final Kind tokenKind; public SecurityInfo(String serverPrincipal, Kind tokenKind) { - this.serverPrincipal = serverPrincipal; + this(tokenKind, serverPrincipal); + } + + public SecurityInfo(Kind tokenKind, String... serverPrincipal) { + Preconditions.checkArgument(serverPrincipal.length > 0); this.tokenKind = tokenKind; + this.serverPrincipals = Arrays.asList(serverPrincipal); } + /** + * Although this class is IA.Private, we leak this class in + * {@code SaslClientAuthenticationProvider}, so need to align with the deprecation cycle for that + * class. + * @deprecated Since 2.5.8 and 2.6.0, will be removed in 4.0.0. Use {@link #getServerPrincipals()} + * instead. + */ + @Deprecated public String getServerPrincipal() { - return serverPrincipal; + return serverPrincipals.get(0); + } + + public List getServerPrincipals() { + return serverPrincipals; } public Kind getTokenKind() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java index 480e724599bd..65893c1a75ca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java @@ -31,7 +31,6 @@ import javax.security.sasl.SaslClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; -import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -46,9 +45,9 @@ public class DigestSaslClientAuthenticationProvider extends DigestSaslAuthentica implements SaslClientAuthenticationProvider { @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + public SaslClient createClient(Configuration conf, InetAddress serverAddr, String serverPrincipal, + Token token, boolean fallbackAllowed, Map saslProps) + throws IOException { return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java index 218fd13b60c1..77e92b35bd8c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java @@ -24,10 +24,7 @@ import javax.security.sasl.SaslClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; -import org.apache.hadoop.hbase.security.SecurityConstants; -import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -43,46 +40,10 @@ public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationPr private static final Logger LOG = LoggerFactory.getLogger(GssSaslClientAuthenticationProvider.class); - private static boolean useCanonicalHostname(Configuration conf) { - return !conf.getBoolean( - SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS, - SecurityConstants.DEFAULT_UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS); - } - - public static String getHostnameForServerPrincipal(Configuration conf, InetAddress addr) { - final String hostname; - - if (useCanonicalHostname(conf)) { - hostname = addr.getCanonicalHostName(); - if (hostname.equals(addr.getHostAddress())) { - LOG.warn("Canonical hostname for SASL principal is the same with IP address: " + hostname - + ", " + addr.getHostName() + ". Check DNS configuration or consider " - + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS + "=true"); - } - } else { - hostname = addr.getHostName(); - } - - return hostname.toLowerCase(); - } - - String getServerPrincipal(Configuration conf, SecurityInfo securityInfo, InetAddress server) - throws IOException { - String hostname = getHostnameForServerPrincipal(conf, server); - - String serverKey = securityInfo.getServerPrincipal(); - if (serverKey == null) { - throw new IllegalArgumentException( - "Can't obtain server Kerberos config key from SecurityInfo"); - } - return SecurityUtil.getServerPrincipal(conf.get(serverKey), hostname); - } - @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { - String serverPrincipal = getServerPrincipal(conf, securityInfo, serverAddr); + public SaslClient createClient(Configuration conf, InetAddress serverAddr, String serverPrincipal, + Token token, boolean fallbackAllowed, Map saslProps) + throws IOException { LOG.debug("Setting up Kerberos RPC to server={}", serverPrincipal); String[] names = SaslUtil.splitKerberosName(serverPrincipal); if (names.length != 3) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java index bbc5ddac91aa..4e23247ca764 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java @@ -31,6 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; /** @@ -45,11 +46,33 @@ public interface SaslClientAuthenticationProvider extends SaslAuthenticationProvider { /** - * Creates the SASL client instance for this auth'n method. + * Creates the SASL client instance for this authentication method. + * @deprecated Since 2.5.8 and 2.6.0. In our own code will not call this method any more, + * customized authentication method should implement + * {@link #createClient(Configuration, InetAddress, String, Token, boolean, Map)} + * instead. Will be removed in 4.0.0. */ - SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, Map saslProps) - throws IOException; + @Deprecated + default SaslClient createClient(Configuration conf, InetAddress serverAddr, + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { + throw new UnsupportedOperationException("should not be used any more"); + } + + /** + * Create the SASL client instance for this authentication method. + *

+ * The default implementation is create a fake {@link SecurityInfo} and call the above method, for + * keeping compatible with old customized authentication method + */ + default SaslClient createClient(Configuration conf, InetAddress serverAddr, + String serverPrincipal, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { + String principalKey = "hbase.fake.kerberos.principal"; + conf.set(principalKey, serverPrincipal); + return createClient(conf, serverAddr, new SecurityInfo(principalKey, Kind.HBASE_AUTH_TOKEN), + token, fallbackAllowed, saslProps); + } /** * Constructs a {@link UserInformation} from the given {@link UserGroupInformation} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java index 6fff703689c9..70e469003c87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java @@ -22,7 +22,6 @@ import java.util.Map; import javax.security.sasl.SaslClient; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -37,7 +36,7 @@ public class SimpleSaslClientAuthenticationProvider extends SimpleSaslAuthentica @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + String serverPrincipal, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return null; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 7b42ba224fac..6b1e7c338329 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -53,10 +53,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -80,9 +78,6 @@ public class TestHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseSaslRpcClient.class); - @Rule - public ExpectedException exception = ExpectedException.none(); - @Test public void testSaslClientUsesGivenRpcProtection() throws Exception { Token token = @@ -90,8 +85,7 @@ public void testSaslClientUsesGivenRpcProtection() throws Exception { DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider(); for (SaslUtil.QualityOfProtection qop : SaslUtil.QualityOfProtection.values()) { String negotiatedQop = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, token, - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false, qop.name(), - false) { + Mockito.mock(InetAddress.class), "", false, qop.name(), false) { public String getQop() { return saslProps.get(Sasl.QOP); } @@ -192,14 +186,14 @@ private boolean assertIOExceptionWhenGetStreamsBeforeConnectCall(String principa DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider() { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + String serverPrincipal, Token token, boolean fallbackAllowed, Map saslProps) { return Mockito.mock(SaslClient.class); } }; HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), "", + false); try { rpcClient.getInputStream(); @@ -224,14 +218,14 @@ private boolean assertIOExceptionThenSaslClientIsNull(String principal, String p new DigestSaslClientAuthenticationProvider() { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, Token token, - boolean fallbackAllowed, Map saslProps) { + String serverPrincipal, Token token, boolean fallbackAllowed, + Map saslProps) { return null; } }; new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), "", + false); return false; } catch (IOException ex) { return true; @@ -254,8 +248,8 @@ private boolean assertSuccessCreationDigestPrincipal(String principal, String pa try { rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), new DigestSaslClientAuthenticationProvider(), - createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), "", + false); } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } @@ -275,7 +269,7 @@ private boolean assertSuccessCreationSimple() { private HBaseSaslRpcClient createSaslRpcClientForKerberos() throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), new GssSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), - Mockito.mock(SecurityInfo.class), false); + "", false); } private Token createTokenMockWithCredentials(String principal, @@ -291,7 +285,7 @@ private Token createTokenMockWithCredentials(String p private HBaseSaslRpcClient createSaslRpcClientSimple() throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), new SimpleSaslClientAuthenticationProvider(), createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + Mockito.mock(InetAddress.class), "", false); } @SuppressWarnings("unchecked") diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/security/provider/example/ShadeSaslClientAuthenticationProvider.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/security/provider/example/ShadeSaslClientAuthenticationProvider.java index d0930a0f3148..3b83d7dda637 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/security/provider/example/ShadeSaslClientAuthenticationProvider.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/security/provider/example/ShadeSaslClientAuthenticationProvider.java @@ -31,7 +31,6 @@ import javax.security.sasl.SaslClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; -import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.hbase.util.Bytes; @@ -46,9 +45,9 @@ public class ShadeSaslClientAuthenticationProvider extends ShadeSaslAuthenticati implements SaslClientAuthenticationProvider { @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + public SaslClient createClient(Configuration conf, InetAddress serverAddr, String serverPrincipal, + Token token, boolean fallbackAllowed, Map saslProps) + throws IOException { return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new ShadeSaslClientCallbackHandler(token)); } diff --git a/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto b/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto index e992e681fbff..3e44f8e16fa6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto +++ b/hbase-protocol-shaded/src/main/protobuf/rpc/RPC.proto @@ -159,3 +159,7 @@ message ResponseHeader { // If present, then an encoded data block follows. optional CellBlockMeta cell_block_meta = 3; } + +message SecurityPreamableResponse { + required string server_principal = 1; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 0876a1fd55f4..a84d132a0132 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -67,6 +67,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; @@ -117,6 +118,7 @@ public abstract class RpcServer implements RpcServerInterface, ConfigurationObse LoggerFactory.getLogger("SecurityLogger." + Server.class.getName()); protected SecretManager secretManager; protected final Map saslProps; + protected final String serverPrincipal; protected ServiceAuthorizationManager authManager; @@ -211,7 +213,7 @@ public abstract class RpcServer implements RpcServerInterface, ConfigurationObse protected final RpcScheduler scheduler; - protected UserProvider userProvider; + protected final UserProvider userProvider; protected final ByteBuffAllocator bbAllocator; @@ -300,8 +302,11 @@ public RpcServer(final Server server, final String name, if (isSecurityEnabled) { saslProps = SaslUtil.initSaslProperties(conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT))); + serverPrincipal = Preconditions.checkNotNull(userProvider.getCurrentUserName(), + "can not get current user name when security is enabled"); } else { saslProps = Collections.emptyMap(); + serverPrincipal = HConstants.EMPTY_STRING; } this.isOnlineLogProviderEnabled = getIsOnlineLogProviderEnabled(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index be97ad582c37..31f46f30c382 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.SecurityPreamableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetConnectionRegistryResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo; @@ -695,6 +696,13 @@ private void doBadPreambleHandling(String msg, Exception e) throws IOException { doRespond(getErrorResponse(msg, e)); } + private void doPreambleResponse(Message resp) throws IOException { + ResponseHeader header = ResponseHeader.newBuilder().setCallId(-1).build(); + ByteBuffer buf = ServerCall.createHeaderAndMessageBytes(resp, header, 0, null); + BufferChain bufChain = new BufferChain(buf); + doRespond(() -> bufChain); + } + private boolean doConnectionRegistryResponse() throws IOException { if (!(rpcServer.server instanceof ConnectionRegistryEndpoint)) { // should be in tests or some scenarios where we should not reach here @@ -710,13 +718,22 @@ private boolean doConnectionRegistryResponse() throws IOException { } GetConnectionRegistryResponse resp = GetConnectionRegistryResponse.newBuilder().setClusterId(clusterId).build(); - ResponseHeader header = ResponseHeader.newBuilder().setCallId(-1).build(); - ByteBuffer buf = ServerCall.createHeaderAndMessageBytes(resp, header, 0, null); - BufferChain bufChain = new BufferChain(buf); - doRespond(() -> bufChain); + doPreambleResponse(resp); return true; } + private void doSecurityPreambleResponse() throws IOException { + if (rpcServer.isSecurityEnabled) { + SecurityPreamableResponse resp = SecurityPreamableResponse.newBuilder() + .setServerPrincipal(rpcServer.serverPrincipal).build(); + doPreambleResponse(resp); + } else { + // security is not enabled, do not need a principal when connecting, throw a special exception + // to let client know it should just use simple authentication + doRespond(getErrorResponse("security is not enabled", new SecurityNotEnabledException())); + } + } + protected final void callCleanupIfNeeded() { if (callCleanup != null) { callCleanup.run(); @@ -738,6 +755,13 @@ protected final PreambleResponse processPreamble(ByteBuffer preambleBuffer) thro ) { return PreambleResponse.CLOSE; } + if ( + ByteBufferUtils.equals(preambleBuffer, preambleBuffer.position(), 6, + RpcClient.SECURITY_PREAMBLE_HEADER, 0, 6) + ) { + doSecurityPreambleResponse(); + return PreambleResponse.CONTINUE; + } if (!ByteBufferUtils.equals(preambleBuffer, preambleBuffer.position(), 4, RPC_HEADER, 0, 4)) { doBadPreambleHandling( "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 9e90a7a31339..1b28c19b4306 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -145,7 +145,7 @@ private int readPreamble() throws IOException { return count; case CONTINUE: // wait for the next preamble header - preambleBuffer.reset(); + preambleBuffer.clear(); return count; case CLOSE: return -1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMultipleServerPrincipalsIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMultipleServerPrincipalsIPC.java new file mode 100644 index 000000000000..237f1cb40259 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMultipleServerPrincipalsIPC.java @@ -0,0 +1,277 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import javax.security.sasl.SaslException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.security.SecurityInfo; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; +import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hbase.thirdparty.io.netty.handler.codec.DecoderException; + +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos; +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; + +/** + * Tests for HBASE-28321, where we have multiple server principals candidates for a rpc service. + *

+ * Put here just because we need to visit some package private classes under this package. + */ +@RunWith(Parameterized.class) +@Category({ SecurityTests.class, MediumTests.class }) +public class TestMultipleServerPrincipalsIPC { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMultipleServerPrincipalsIPC.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private static final File KEYTAB_FILE = + new File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); + + private static MiniKdc KDC; + private static String HOST = "localhost"; + private static String SERVER_PRINCIPAL; + private static String SERVER_PRINCIPAL2; + private static String CLIENT_PRINCIPAL; + + @Parameter(0) + public Class rpcServerImpl; + + @Parameter(1) + public Class rpcClientImpl; + + private Configuration clientConf; + private Configuration serverConf; + private UserGroupInformation clientUGI; + private UserGroupInformation serverUGI; + private RpcServer rpcServer; + private RpcClient rpcClient; + + @Parameters(name = "{index}: rpcServerImpl={0}, rpcClientImpl={1}") + public static List params() { + List params = new ArrayList<>(); + List> rpcServerImpls = + Arrays.asList(NettyRpcServer.class, SimpleRpcServer.class); + List> rpcClientImpls = + Arrays.asList(NettyRpcClient.class, BlockingRpcClient.class); + for (Class rpcServerImpl : rpcServerImpls) { + for (Class rpcClientImpl : rpcClientImpls) { + params.add(new Object[] { rpcServerImpl, rpcClientImpl }); + } + } + return params; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + KDC = TEST_UTIL.setupMiniKdc(KEYTAB_FILE); + SERVER_PRINCIPAL = "server/" + HOST + "@" + KDC.getRealm(); + SERVER_PRINCIPAL2 = "server2/" + HOST + "@" + KDC.getRealm(); + CLIENT_PRINCIPAL = "client"; + KDC.createPrincipal(KEYTAB_FILE, CLIENT_PRINCIPAL, SERVER_PRINCIPAL, SERVER_PRINCIPAL2); + setSecuredConfiguration(TEST_UTIL.getConfiguration()); + TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxbackoff", 1); + TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxretries", 0); + TEST_UTIL.getConfiguration().setInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, 10); + } + + @AfterClass + public static void tearDownAfterClass() { + if (KDC != null) { + KDC.stop(); + } + } + + private static void setSecuredConfiguration(Configuration conf) { + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos"); + conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true); + } + + private void loginAndStartRpcServer(String principal, int port) throws Exception { + UserGroupInformation.setConfiguration(serverConf); + serverUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, + KEYTAB_FILE.getCanonicalPath()); + rpcServer = serverUGI.doAs((PrivilegedExceptionAction< + RpcServer>) () -> RpcServerFactory.createRpcServer(null, getClass().getSimpleName(), + Lists.newArrayList( + new RpcServer.BlockingServiceAndInterface(TestProtobufRpcServiceImpl.SERVICE, null)), + new InetSocketAddress(HOST, port), serverConf, new FifoRpcScheduler(serverConf, 1))); + rpcServer.start(); + } + + @Before + public void setUp() throws Exception { + clientConf = new Configuration(TEST_UTIL.getConfiguration()); + clientConf.setClass(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, rpcClientImpl, + RpcClient.class); + String serverPrincipalConfigName = "hbase.test.multiple.principal.first"; + String serverPrincipalConfigName2 = "hbase.test.multiple.principal.second"; + clientConf.set(serverPrincipalConfigName, SERVER_PRINCIPAL); + clientConf.set(serverPrincipalConfigName2, SERVER_PRINCIPAL2); + serverConf = new Configuration(TEST_UTIL.getConfiguration()); + serverConf.setClass(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl, + RpcServer.class); + SecurityInfo securityInfo = new SecurityInfo(Kind.HBASE_AUTH_TOKEN, serverPrincipalConfigName2, + serverPrincipalConfigName); + SecurityInfo.addInfo(TestProtobufRpcProto.getDescriptor().getName(), securityInfo); + + UserGroupInformation.setConfiguration(clientConf); + clientUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(CLIENT_PRINCIPAL, + KEYTAB_FILE.getCanonicalPath()); + loginAndStartRpcServer(SERVER_PRINCIPAL, 0); + rpcClient = clientUGI.doAs((PrivilegedExceptionAction) () -> RpcClientFactory + .createClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString())); + } + + @After + public void tearDown() throws IOException { + Closeables.close(rpcClient, true); + rpcServer.stop(); + } + + private String echo(String msg) throws Exception { + return clientUGI.doAs((PrivilegedExceptionAction) () -> { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( + ServerName.valueOf(HOST, rpcServer.getListenerAddress().getPort(), -1), User.getCurrent(), + 10000); + TestProtobufRpcProto.BlockingInterface stub = TestProtobufRpcProto.newBlockingStub(channel); + return stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage(msg).build()) + .getMessage(); + }); + } + + @Test + public void testEcho() throws Exception { + String msg = "Hello World"; + assertEquals(msg, echo(msg)); + } + + @Test + public void testMaliciousServer() throws Exception { + // reset the server principals so the principal returned by server does not match + SecurityInfo securityInfo = + SecurityInfo.getInfo(TestProtobufRpcProto.getDescriptor().getName()); + for (int i = 0; i < securityInfo.getServerPrincipals().size(); i++) { + clientConf.set(securityInfo.getServerPrincipals().get(i), + "valid_server_" + i + "/" + HOST + "@" + KDC.getRealm()); + } + UndeclaredThrowableException error = + assertThrows(UndeclaredThrowableException.class, () -> echo("whatever")); + assertThat(error.getCause(), instanceOf(ServiceException.class)); + assertThat(error.getCause().getCause(), instanceOf(SaslException.class)); + } + + @Test + public void testRememberLastSucceededServerPrincipal() throws Exception { + // after this call we will remember the last succeeded server principal + assertEquals("a", echo("a")); + // shutdown the connection, but does not remove it from pool + RpcConnection conn = + Iterables.getOnlyElement(((AbstractRpcClient) rpcClient).getConnections().values()); + conn.shutdown(); + // recreate rpc server with server principal2 + int port = rpcServer.getListenerAddress().getPort(); + rpcServer.stop(); + serverUGI.logoutUserFromKeytab(); + loginAndStartRpcServer(SERVER_PRINCIPAL2, port); + // this time we will still use the remembered server principal, so we will get a sasl exception + UndeclaredThrowableException error = + assertThrows(UndeclaredThrowableException.class, () -> echo("a")); + assertThat(error.getCause(), instanceOf(ServiceException.class)); + // created by IPCUtil.wrap, to prepend the server address + assertThat(error.getCause().getCause(), instanceOf(IOException.class)); + // wraped IPCUtil.toIOE + assertThat(error.getCause().getCause().getCause(), instanceOf(IOException.class)); + Throwable cause = error.getCause().getCause().getCause().getCause(); + // for netty rpc client, it is DecoderException, for blocking rpc client, it is already + // RemoteExcetion + assertThat(cause, + either(instanceOf(DecoderException.class)).or(instanceOf(RemoteException.class))); + RemoteException rme; + if (!(cause instanceof RemoteException)) { + assertThat(cause.getCause(), instanceOf(RemoteException.class)); + rme = (RemoteException) cause.getCause(); + } else { + rme = (RemoteException) cause; + } + assertEquals(SaslException.class.getName(), rme.getClassName()); + // the above failure will clear the remembered server principal, so this time we will get the + // correct one. We use retry here just because a failure of sasl negotiation will trigger a + // relogin and it may take some time, and for netty based implementation the relogin is async + TEST_UTIL.waitFor(10000, () -> { + try { + echo("a"); + } catch (UndeclaredThrowableException e) { + Throwable t = e.getCause().getCause(); + assertThat(t, instanceOf(IOException.class)); + if (!(t instanceof FailedServerException)) { + // for netty rpc client + assertThat(e.getCause().getMessage(), + containsString(RpcConnectionConstants.RELOGIN_IS_IN_PROGRESS)); + } + return false; + } + return true; + }); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java index bc791754a12e..345514396d6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcSkipInitialSaslHandshake.java @@ -28,6 +28,7 @@ import java.io.File; import java.net.InetSocketAddress; +import java.util.Collections; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -125,8 +126,8 @@ public void setUpTest() throws Exception { @Test public void test() throws Exception { SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); - Mockito.when(securityInfoMock.getServerPrincipal()) - .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); + Mockito.when(securityInfoMock.getServerPrincipals()) + .thenReturn(Collections.singletonList(HBaseKerberosUtils.KRB_PRINCIPAL)); SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); final AtomicReference conn = new AtomicReference<>(null); @@ -152,7 +153,6 @@ protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channe .getMessage(); assertTrue("test".equals(response)); assertFalse(conn.get().useSasl); - } finally { rpcServer.stop(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecurityRpcSentBytesMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecurityRpcSentBytesMetrics.java index b5e46b5c7cf5..a74477bf28c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecurityRpcSentBytesMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecurityRpcSentBytesMetrics.java @@ -27,6 +27,7 @@ import java.io.File; import java.net.InetSocketAddress; +import java.util.Collections; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -122,8 +123,8 @@ public void setUpTest() throws Exception { @Test public void test() throws Exception { SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); - Mockito.when(securityInfoMock.getServerPrincipal()) - .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); + Mockito.when(securityInfoMock.getServerPrincipals()) + .thenReturn(Collections.singletonList(HBaseKerberosUtils.KRB_PRINCIPAL)); SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); NettyRpcServer rpcServer = new NettyRpcServer(null, getClass().getSimpleName(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java index 998896c94685..31e01a98ad69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java @@ -104,7 +104,7 @@ protected static void initKDCAndConf() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxbackoff", 100); } - protected static void stopKDC() throws InterruptedException { + protected static void stopKDC() { if (KDC != null) { KDC.stop(); } @@ -192,8 +192,8 @@ public static class CanonicalHostnameTestingAuthenticationProviderSelector return new SaslClientAuthenticationProvider() { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, - boolean fallbackAllowed, Map saslProps) throws IOException { + String serverPrincipal, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { final String s = conf.get(CANONICAL_HOST_NAME_KEY); if (s != null) { try { @@ -206,7 +206,7 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddr, } } - return delegate.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, + return delegate.createClient(conf, serverAddr, serverPrincipal, token, fallbackAllowed, saslProps); } @@ -385,8 +385,8 @@ private void setCryptoAES(String clientCryptoAES, String serverCryptoAES) { */ private void callRpcService(User serverUser, User clientUser) throws Exception { SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); - Mockito.when(securityInfoMock.getServerPrincipal()) - .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); + Mockito.when(securityInfoMock.getServerPrincipals()) + .thenReturn(Collections.singletonList(HBaseKerberosUtils.KRB_PRINCIPAL)); SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); InetSocketAddress isa = new InetSocketAddress(HOST, 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMultipleServerPrincipalsFallbackToSimple.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMultipleServerPrincipalsFallbackToSimple.java new file mode 100644 index 000000000000..6f1cc148204a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMultipleServerPrincipalsFallbackToSimple.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.BlockingRpcClient; +import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; +import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; +import org.apache.hadoop.hbase.ipc.NettyRpcClient; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; +import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; + +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos; +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; + +/** + * Test secure client connecting to a non secure server, where we have multiple server principal + * candidates for a rpc service. See HBASE-28321. + */ +@RunWith(Parameterized.class) +@Category({ SecurityTests.class, MediumTests.class }) +public class TestMultipleServerPrincipalsFallbackToSimple { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMultipleServerPrincipalsFallbackToSimple.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private static final File KEYTAB_FILE = + new File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); + + private static MiniKdc KDC; + private static String HOST = "localhost"; + private static String SERVER_PRINCIPAL; + private static String SERVER_PRINCIPAL2; + private static String CLIENT_PRINCIPAL; + + @Parameter + public Class rpcClientImpl; + + private Configuration clientConf; + private UserGroupInformation clientUGI; + private RpcServer rpcServer; + private RpcClient rpcClient; + + @Parameters(name = "{index}: rpcClientImpl={0}") + public static List params() { + return Arrays.asList(new Object[] { NettyRpcClient.class }, + new Object[] { BlockingRpcClient.class }); + } + + private static void setSecuredConfiguration(Configuration conf) { + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos"); + conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true); + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + KDC = TEST_UTIL.setupMiniKdc(KEYTAB_FILE); + SERVER_PRINCIPAL = "server/" + HOST; + SERVER_PRINCIPAL2 = "server2/" + HOST; + CLIENT_PRINCIPAL = "client"; + KDC.createPrincipal(KEYTAB_FILE, CLIENT_PRINCIPAL, SERVER_PRINCIPAL, SERVER_PRINCIPAL2); + TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxbackoff", 1); + TEST_UTIL.getConfiguration().setInt("hbase.security.relogin.maxretries", 0); + TEST_UTIL.getConfiguration().setInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, 10); + } + + @Before + public void setUp() throws Exception { + clientConf = new Configuration(TEST_UTIL.getConfiguration()); + setSecuredConfiguration(clientConf); + clientConf.setClass(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, rpcClientImpl, + RpcClient.class); + String serverPrincipalConfigName = "hbase.test.multiple.principal.first"; + String serverPrincipalConfigName2 = "hbase.test.multiple.principal.second"; + clientConf.set(serverPrincipalConfigName, "server/localhost@" + KDC.getRealm()); + clientConf.set(serverPrincipalConfigName2, "server2/localhost@" + KDC.getRealm()); + SecurityInfo securityInfo = new SecurityInfo(Kind.HBASE_AUTH_TOKEN, serverPrincipalConfigName2, + serverPrincipalConfigName); + SecurityInfo.addInfo(TestProtobufRpcProto.getDescriptor().getName(), securityInfo); + + UserGroupInformation.setConfiguration(clientConf); + clientUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(CLIENT_PRINCIPAL, + KEYTAB_FILE.getCanonicalPath()); + + rpcServer = RpcServerFactory.createRpcServer(null, getClass().getSimpleName(), + Lists.newArrayList( + new RpcServer.BlockingServiceAndInterface(TestProtobufRpcServiceImpl.SERVICE, null)), + new InetSocketAddress(HOST, 0), TEST_UTIL.getConfiguration(), + new FifoRpcScheduler(TEST_UTIL.getConfiguration(), 1)); + rpcServer.start(); + } + + @After + public void tearDown() throws IOException { + Closeables.close(rpcClient, true); + rpcServer.stop(); + } + + private RpcClient createClient() throws Exception { + return clientUGI.doAs((PrivilegedExceptionAction) () -> RpcClientFactory + .createClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString())); + } + + private String echo(String msg) throws Exception { + return clientUGI.doAs((PrivilegedExceptionAction) () -> { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( + ServerName.valueOf(HOST, rpcServer.getListenerAddress().getPort(), -1), User.getCurrent(), + 10000); + TestProtobufRpcProto.BlockingInterface stub = TestProtobufRpcProto.newBlockingStub(channel); + return stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage(msg).build()) + .getMessage(); + }); + } + + @Test + public void testAllowFallbackToSimple() throws Exception { + clientConf.setBoolean(RpcClient.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true); + rpcClient = createClient(); + assertEquals("allow", echo("allow")); + } + + @Test + public void testDisallowFallbackToSimple() throws Exception { + clientConf.setBoolean(RpcClient.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, false); + rpcClient = createClient(); + UndeclaredThrowableException error = + assertThrows(UndeclaredThrowableException.class, () -> echo("disallow")); + Throwable cause = error.getCause().getCause().getCause(); + assertThat(cause, instanceOf(FallbackDisallowedException.class)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPCRejectPlainText.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPCRejectPlainText.java index a6984fcdf3a8..ea9b6948011d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPCRejectPlainText.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPCRejectPlainText.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.setSecuredConfiguration; import java.io.File; +import java.util.Collections; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl; @@ -66,8 +67,8 @@ public static void setUpBeforeClass() throws Exception { UGI = loginKerberosPrincipal(KEYTAB_FILE.getCanonicalPath(), PRINCIPAL); setSecuredConfiguration(util.getConfiguration()); SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); - Mockito.when(securityInfoMock.getServerPrincipal()) - .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); + Mockito.when(securityInfoMock.getServerPrincipals()) + .thenReturn(Collections.singletonList(HBaseKerberosUtils.KRB_PRINCIPAL)); SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java index feba17364cc4..66b65ba03f04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java @@ -75,7 +75,6 @@ import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.HBaseKerberosUtils; import org.apache.hadoop.hbase.security.SaslUtil; -import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.token.SecureTestCluster; import org.apache.hadoop.hbase.security.token.TokenProvider; @@ -202,7 +201,7 @@ public static class InMemoryClientProvider extends AbstractSaslClientAuthenticat @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + String serverPrincipal, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return Sasl.createSaslClient(new String[] { MECHANISM }, null, null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new InMemoryClientProviderCallbackHandler(token)); From 4b552434dbd9950aee28f183caba568c585c6f2a Mon Sep 17 00:00:00 2001 From: Rushabh Shah Date: Tue, 27 Feb 2024 08:49:34 -0800 Subject: [PATCH 263/514] HBASE-28391 Remove the need for ADMIN permissions for listDecommissionedRegionServers (#5695) --- .../apache/hadoop/hbase/security/access/AccessController.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 66a7b3a27032..563470f9404d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1206,7 +1206,7 @@ public void preDecommissionRegionServers(ObserverContext ctx) throws IOException { - requirePermission(ctx, "listDecommissionedRegionServers", Action.ADMIN); + requirePermission(ctx, "listDecommissionedRegionServers", Action.READ); } @Override From 14cb496433e5fb41096a590c49544b87c9099ee5 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 28 Feb 2024 22:57:32 +0800 Subject: [PATCH 264/514] HBASE-28384 Client ingegration tests fails for branch-2/branch-2.6 (#5714) Signed-off-by: Yi Mei Signed-off by: Nick Dimiduk (cherry picked from commit 1c609f7ae406b11338fa4bb92635b4dee374152a) Forward cherry-pick to align the Jenkinsfile and build scripts --- dev-support/Jenkinsfile | 32 +++++++-- dev-support/hbase_nightly_source-artifact.sh | 75 +++++++++++++++----- 2 files changed, 84 insertions(+), 23 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index a767775b36b4..19256ccb9b12 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -657,6 +657,8 @@ pipeline { rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball" rm -rf "hbase-install" && mkdir "hbase-install" rm -rf "hbase-client" && mkdir "hbase-client" + rm -rf "hbase-hadoop3-install" + rm -rf "hbase-hadoop3-client" rm -rf "hadoop-2" && mkdir "hadoop-2" rm -rf "hadoop-3" && mkdir "hadoop-3" rm -rf ".m2-for-repo" && mkdir ".m2-for-repo" @@ -691,14 +693,23 @@ pipeline { """ echo "unpacking the hbase bin tarball into 'hbase-install' and the client tarball into 'hbase-client'" sh '''#!/bin/bash -e - if [ 2 -ne $(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | wc -l) ]; then + if [ 2 -ne $(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | grep -v hadoop3 | wc -l) ]; then echo '(x) {color:red}-1 testing binary artifact{color}\n-- source tarball did not produce the expected binaries.' >>output-srctarball/commentfile exit 1 fi - install_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | sort | head -n 1) + install_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | grep -v client-bin | grep -v hadoop3) tar --strip-component=1 -xzf "${install_artifact}" -C "hbase-install" - client_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | sort | tail -n 1) + client_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-client-bin.tar.gz | grep -v hadoop3) tar --strip-component=1 -xzf "${client_artifact}" -C "hbase-client" + if [ 2 -eq $(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz | wc -l) ]; then + echo "hadoop3 artifacts available, unpacking the hbase hadoop3 bin tarball into 'hbase-hadoop3-install' and the client hadoop3 tarball into 'hbase-hadoop3-client'" + mkdir hbase-hadoop3-install + mkdir hbase-hadoop3-client + hadoop3_install_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz | grep -v client-bin) + tar --strip-component=1 -xzf "${hadoop3_install_artifact}" -C "hbase-hadoop3-install" + hadoop3_client_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-client-bin.tar.gz) + tar --strip-component=1 -xzf "${hadoop3_client_artifact}" -C "hbase-hadoop3-client" + fi ''' unstash 'hadoop-2' sh '''#!/bin/bash -xe @@ -731,11 +742,18 @@ pipeline { tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3" # we need to patch some files otherwise minicluster will fail to start, see MAPREDUCE-7471 ${BASEDIR}/dev-support/patch-hadoop3.sh hadoop-3 + hbase_install_dir="hbase-install" + hbase_client_dir="hbase-client" + if [ -d "hbase-hadoop3-install" ]; then + echo "run hadoop3 client integration test against hbase hadoop3 binaries" + hbase_install_dir="hbase-hadoop3-install" + hbase_client_dir="hbase-hadoop3-client" + fi if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \ --single-process \ --working-dir output-integration/hadoop-3 \ - --hbase-client-install hbase-client \ - hbase-install \ + --hbase-client-install ${hbase_client_dir} \ + ${hbase_install_dir} \ hadoop-3/bin/hadoop \ hadoop-3/share/hadoop/yarn/timelineservice \ hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \ @@ -750,8 +768,8 @@ pipeline { --single-process \ --hadoop-client-classpath hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar \ --working-dir output-integration/hadoop-3-shaded \ - --hbase-client-install hbase-client \ - hbase-install \ + --hbase-client-install ${hbase_client_dir} \ + ${hbase_install_dir} \ hadoop-3/bin/hadoop \ hadoop-3/share/hadoop/yarn/timelineservice \ hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \ diff --git a/dev-support/hbase_nightly_source-artifact.sh b/dev-support/hbase_nightly_source-artifact.sh index 1698fd652cd8..79c62ca77434 100755 --- a/dev-support/hbase_nightly_source-artifact.sh +++ b/dev-support/hbase_nightly_source-artifact.sh @@ -16,7 +16,6 @@ # specific language governing permissions and limitations # under the License. -set -e function usage { echo "Usage: ${0} [options] /path/to/component/checkout" echo "" @@ -169,20 +168,64 @@ else echo "Everything looks as expected." fi +function get_hadoop3_version { + local version="$1" + if [[ "${version}" =~ -SNAPSHOT$ ]]; then + echo "${version/-SNAPSHOT/-hadoop3-SNAPSHOT}" + else + echo "${version}-hadoop3" + fi +} + +function build_tarball { + local build_hadoop3=$1 + local mvn_extra_args="" + local build_log="srctarball_install.log" + local tarball_glob="hbase-*-bin.tar.gz" + if [ $build_hadoop3 -ne 0 ]; then + local version=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) + local hadoop3_version=$(get_hadoop3_version $version) + mvn_extra_args="-Drevision=${hadoop3_version} -Dhadoop.profile=3.0" + build_log="hadoop3_srctarball_install.log" + tarball_glob="hbase-*-hadoop3-*-bin.tar.gz" + echo "Follow the ref guide section on making a RC: Step 8 Build the hadoop3 binary tarball." + else + echo "Follow the ref guide section on making a RC: Step 8 Build the binary tarball." + fi + if mvn --threads=2 -DskipTests -Prelease --batch-mode -Dmaven.repo.local="${m2_tarbuild}" ${mvn_extra_args} clean install \ + assembly:single >"${working_dir}/${build_log}" 2>&1; then + for artifact in "${unpack_dir}"/hbase-assembly/target/${tarball_glob}; do + if [ -f "${artifact}" ]; then + # TODO check the layout of the binary artifact we just made. + echo "Building a binary tarball from the source tarball succeeded." + return 0 + fi + done + fi + + echo "Building a binary tarball from the source tarball failed. see ${working_dir}/${build_log} for details." + # Copy up the rat.txt to the working dir so available in build archive in case rat complaints. + # rat.txt can be under any module target dir... copy them all up renaming them to include parent dir as we go. + find ${unpack_dir} -name rat.txt -type f | while IFS= read -r NAME; do cp -v "$NAME" "${working_dir}/${NAME//\//_}"; done + return 1 +} + cd "${unpack_dir}" -echo "Follow the ref guide section on making a RC: Step 8 Build the binary tarball." -if mvn --threads=2 -DskipTests -Prelease --batch-mode -Dmaven.repo.local="${m2_tarbuild}" clean install \ - assembly:single >"${working_dir}/srctarball_install.log" 2>&1; then - for artifact in "${unpack_dir}"/hbase-assembly/target/hbase-*-bin.tar.gz; do - if [ -f "${artifact}" ]; then - # TODO check the layout of the binary artifact we just made. - echo "Building a binary tarball from the source tarball succeeded." - exit 0 - fi - done + +build_tarball 0 +if [ $? -ne 0 ]; then + exit 1 +fi + +mvn help:active-profiles | grep -q hadoop-3.0 +if [ $? -ne 0 ]; then + echo "The hadoop-3.0 profile is not activated by default, build a hadoop3 tarball." + # move the previous tarballs out, so it will not be cleaned while building against hadoop3 + mv "${unpack_dir}"/hbase-assembly/target/hbase-*-bin.tar.gz "${unpack_dir}"/ + build_tarball 1 + if [ $? -ne 0 ]; then + exit 1 + fi + # move tarballs back + mv "${unpack_dir}"/hbase-*-bin.tar.gz "${unpack_dir}"/hbase-assembly/target/ fi -echo "Building a binary tarball from the source tarball failed. see ${working_dir}/srctarball_install.log for details." -# Copy up the rat.txt to the working dir so available in build archive in case rat complaints. -# rat.txt can be under any module target dir... copy them all up renaming them to include parent dir as we go. -find ${unpack_dir} -name rat.txt -type f | while IFS= read -r NAME; do cp -v "$NAME" "${working_dir}/${NAME//\//_}"; done -exit 1 From a1f09d1bb803c59da6440b8c562b9be3b8e65b38 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 28 Feb 2024 23:12:11 +0800 Subject: [PATCH 265/514] HBASE-28321 Addendum change deprecation cycle --- .../java/org/apache/hadoop/hbase/security/SecurityInfo.java | 3 +-- .../security/provider/SaslClientAuthenticationProvider.java | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index a33f49573dee..561b6931c057 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -97,8 +97,7 @@ public SecurityInfo(Kind tokenKind, String... serverPrincipal) { * Although this class is IA.Private, we leak this class in * {@code SaslClientAuthenticationProvider}, so need to align with the deprecation cycle for that * class. - * @deprecated Since 2.5.8 and 2.6.0, will be removed in 4.0.0. Use {@link #getServerPrincipals()} - * instead. + * @deprecated Since 2.6.0, will be removed in 4.0.0. Use {@link #getServerPrincipals()} instead. */ @Deprecated public String getServerPrincipal() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java index 4e23247ca764..a1a22a70f0ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java @@ -47,8 +47,8 @@ public interface SaslClientAuthenticationProvider extends SaslAuthenticationProv /** * Creates the SASL client instance for this authentication method. - * @deprecated Since 2.5.8 and 2.6.0. In our own code will not call this method any more, - * customized authentication method should implement + * @deprecated Since 2.6.0. In our own code will not call this method any more, customized + * authentication method should implement * {@link #createClient(Configuration, InetAddress, String, Token, boolean, Map)} * instead. Will be removed in 4.0.0. */ From a997301d39f525f97bce82adf67ce7ebcda36006 Mon Sep 17 00:00:00 2001 From: xxishu <71439900+xxishu@users.noreply.github.com> Date: Thu, 29 Feb 2024 14:18:58 +0800 Subject: [PATCH 266/514] HBASE-28313 StorefileRefresherChore should not refresh readonly table (#5641) Co-authored-by: sunhao5 Signed-off-by: Duo Zhang --- .../regionserver/StorefileRefresherChore.java | 10 +++- .../TestStoreFileRefresherChore.java | 52 ++++++++++++++++++- 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 40108e346d1c..1111e72bcf76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; @@ -81,8 +82,13 @@ public StorefileRefresherChore(int period, boolean onlyMetaRefresh, HRegionServe @Override protected void chore() { for (Region r : regionServer.getOnlineRegionsLocalContext()) { - if (!r.isReadOnly()) { - // skip checking for this region if it can accept writes + if ( + !r.isReadOnly() || r.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID + || r.getTableDescriptor().isReadOnly() + ) { + // Skip checking for this region if it can accept writes. + // The refresher is only for refreshing secondary replicas. And if the table is readonly, + // meaning no writes to the primary replica, skip checking the secondary replicas as well. continue; } // don't refresh unless enabled for all files, or it the meta region diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index 5f900e707d38..93b2a87545b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -81,8 +81,13 @@ public void setUp() throws IOException { private TableDescriptor getTableDesc(TableName tableName, int regionReplication, byte[]... families) { - TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); + return getTableDesc(tableName, regionReplication, false, families); + } + + private TableDescriptor getTableDesc(TableName tableName, int regionReplication, boolean readOnly, + byte[]... families) { + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) + .setRegionReplication(regionReplication).setReadOnly(readOnly); Arrays.stream(families).map(family -> ColumnFamilyDescriptorBuilder.newBuilder(family) .setMaxVersions(Integer.MAX_VALUE).build()).forEachOrdered(builder::setColumnFamily); return builder.build(); @@ -235,4 +240,47 @@ public void testIsStale() throws IOException { // expected } } + + @Test + public void testRefreshReadOnlyTable() throws IOException { + int period = 0; + byte[][] families = new byte[][] { Bytes.toBytes("cf") }; + byte[] qf = Bytes.toBytes("cq"); + + HRegionServer regionServer = mock(HRegionServer.class); + List regions = new ArrayList<>(); + when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); + when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); + + TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, families); + HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); + HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); + regions.add(primary); + regions.add(replica1); + + StorefileRefresherChore chore = + new StorefileRefresherChore(period, false, regionServer, new StoppableImplementation()); + + // write some data to primary and flush + putData(primary, 0, 100, qf, families); + primary.flush(true); + verifyData(primary, 0, 100, qf, families); + + verifyDataExpectFail(replica1, 0, 100, qf, families); + chore.chore(); + verifyData(replica1, 0, 100, qf, families); + + // write some data to primary and flush before refresh the store files for the replica + putData(primary, 100, 100, qf, families); + primary.flush(true); + verifyData(primary, 0, 200, qf, families); + + // then the table is set to readonly + htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, true, families); + primary.setTableDescriptor(htd); + replica1.setTableDescriptor(htd); + + chore.chore(); // we cannot refresh the store files + verifyDataExpectFail(replica1, 100, 100, qf, families); + } } From cde1c9c66b10718ddfb2f0539cccdb8c8bfe99e0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 29 Feb 2024 15:16:31 +0800 Subject: [PATCH 267/514] HBASE-28394 Attach the design doc for preamble call to our code base (#5716) Signed-off-by: Nihal Jain --- .../RpcConnectionRegistry and Security.pdf | Bin 0 -> 69697 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 dev-support/design-docs/RpcConnectionRegistry and Security.pdf diff --git a/dev-support/design-docs/RpcConnectionRegistry and Security.pdf b/dev-support/design-docs/RpcConnectionRegistry and Security.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8361010a2737578a36d09dfb6c2cf29c0bc85582 GIT binary patch literal 69697 zcmb@sRd5_Zl%^|Y$zo>a7Be$5GqsqRWieUQVrFJ)v1M6oF*B3J47TWcW;SAGZ*1(- z)4JA;S_(!bOHn1%DdXR|aXXGa1^D3b$JT-=?^9sfIs z`@e%E#s4Gdze1HH$=Q$qN(%qk>XNgwa{kxBiv&tW2-2dG7$@B(_Un!B1?+5%Z!+-(5%cJAf?3wIYE4}=c||F28ncft&P7d)1?k0XLc_H{0~Sm)wz$i(2M?>yM_MG z_leIyMDD`i_rLv5RKky`u?_EqkUTpjnFcm&)h2Ni601q6jNNEuDM$)eOq)o$mX zA7rYZ*p|OO-ySwXg7%0hS3pC~e&3T9PND2}8Po2s@ydiut0>kyUXGyfuGKGu7#A*J z?OIEJJ$i~u?&(yOWeNuK8&?%q9j~7KblsAF`y5pM>%Ld%0$7{~-*<5yn|+op$wZzB zr$o}Xh<|h6`$N+I;T?2cmLp-OyzcrXRtaDk8AU#CTF;-*aqD+oE|xYt(%WR-E4YdH z=mY9_Hp$ri8CqHOeqbRvEu480%hWjCHzlP)c$q(gHG(N;(R?jLw*2;Z@38dN;_$tp zJYuAT;XD^nv5`(G_>uMPQ21OZou$p7hUF%=)n_l`*o4Ao>~WpTCP3Js^#Ey-GoRNs zhDt$Y9)udy-~2WKy+!^!~Czccd&?QkrMWKF3YPsA8UWEQ>Lq3+K45e zqIDbvZ?kc}$;|VHcpjbC{!hPEJl3@P+fL>#>LCS#dm;*3(-h>jIM#1Z%-MjzF44`x zUgWDLYtftDcc($H9(Bk)Nu%`rQF*0AXuARlpik7+Cv*#VYd!f+tbVP|(WN`fIY667 zv;k?Fl@->1tZI-vU;gnD>*pO^@ciJ4k}j!*Ymoi@C2*f_wPTU6e~s48 z`}5_*xY0QknYXj_&V_(DdCsZ=Q@IbZMOwz)>7~Z{PRs!AFf>HR*iWx-U<&EYPao!L zBN3}LS-j=;xo6%qw*&7bq^TKf9;#_oSX}1|M$g@V zHu_4+3c75z2wQCnCBDEXml$X;vz$1Ki!~JNcf=U-#5MqZs*p&-RUatH2YeEAQ~JuM z^6~7?=0$irV|kKf*OkZS)^I89JuGU89!0a-lcDvZ--Kogw57m=aW{S)HdgV0_Racj z<2bp5CfBTdTYx~PA=hkW7HfnIG-3E#8`Ro&PIeC?(T`b{js%enjlQ}O{_(BL=HY8m zPgj!lHzZBAR~6u%FDV=S0XOkl4Wu^bO9#|ARc)&&s$V07dhyy1BaQg47B+6P*JEh} z3F4)pLaie{zorK_@>juv!*IK4UNDUOT=sca=e&8M3A~>CUxOucwQ{qQ`qHqVh&XCHvl(Q`T1EXf!(pPHL!xev{Xbj>8rIeCRdvxaC8tMx@l zdyu^h(?Su|yraHbz(Wd_6(azxwqvzNd) zyTpG>kmK|s>Ub;Sk!2O9w2}BR$`YNeb_;lI-^y;F<_P@Lv>*@vfyD zbLCI3kgH;lJo7gp=)z5NSe^_A%-}=ob5z@)AyawePs3AWLkR)_Dy2?_D;ek}7m(au zTVF)c>@(F2fKru#RvQ~4R9`Lrgzn~`avvIC@BQj&u_=Rz&SKA*9)s2B>`>(GDX!}L zx%-^u9nu|TRSpyIJ-y^3dzWfFnzkFfm)W#)40fNBX@?CyVcj&s^6?!}mrh!hE+A5Wk;^-wRwDcO08q zsdnenJ$FgF!1M~Cird70L{*50;C~7LB?u{_w^H3)3iX9?_?l3g!msWz_}wit``2&f zF~WA9i%2(fjGlR7K3ku0?oW}RH-htoS^MY_BozKAsdk&1N2b}EVNfI68DcZ1lkRF! zFP)_FzYTIk?;!EAo#y1O=}E{1dZg|&XmU&~O7b;q5c_m_F=b6)V)zZa`9ShitS)fc za&iKP1asFe@2$2=zuoWoFVAu;lrXuiXTsUP7Kkp}FQ=nWhxid&{OYZp9F=vRiV+{k z&WP(}KQrnI8Oue#2jrN{FfrQ$jZbRzvPa*}BRx-MaV_q3{%IWcpMrZ{a` zsB!4Iy}!3DqGBxt@s6v1%<-k^2!_J$*^pn zq%%6pq?Zcv2vyIFgtHD+h$U4%CYAJVJ<~2eL-bL6DqQ1_rvU~8D{<1qNrj0tjDswY z)G5NJj8pf>2bQNvn>#aY; zl3sGatT-g9IjxeF`aW#@k|WRL(G?`+#*NTdCl*d9ale%2$6+)RCL4P*Ry7VgvfyJG zg=&xCN1M={;!VA%e>KBP{O#Vf21nMunBI-}L29A|bXjDAp=f>y67fUTLOo%=+;bwJ z6pm)a!x<|7>!yy+O_Yy2)NSfToC;>WO#ZPiShnd}1Lq`#m+>0K^$ETFTr*ev#NmYA z6J*H4)1E9%{@jEttBW}HQ`{kRzqljZeuHq};fgu`;!omn?}LS>9>-mSG!83CmN+8t zi{wzUzx{q`A(~WYpbzgoqmN7v>zrg!`rV7YqXVB?t!YZrXs^> z6e+>Fke-N2D_5a%y&WzG&}&&d3p{ykX`a4m7otkFti@9HYF74{_YwiAf8&-A*N^(h zuk`d5G?cq~Tv!-s*vg}9OH@CHIo~S8a2R(#Ft+eke%544HCLeLD!eiS?g9q@fV%MyqEr7}ji0Ch;jhDFDlP3`{>Ym4S;~Fh&Eu=0D^=_v#lWJ@&;me~KCvL; z^-`o^8K<)))uBC^7U3fL^%jvQKSXDT>}(tVx`3(Z9Ry%3*Zf(Y^@-HCf0V$w_)rV| zl2;VYv|y9%(}#P~cDi%* z336UQXjl0QLA%2F9L^t%UAUHvRg%**t>OdtrdmV6MDeKmy~i_#ou!8kR87wH(+MY? zD_Vg-zVTW6iX`=Zd-_LuFw#A>fNEHGIjl21d`0zH?eK{b$}+87bUM7nwjxFk7KP^& z*6#8B_5i*z7bgu4_2s+sU+cV=ObJkqZcazvYC$G>0d6k#hSIep@{`X%8)Gkt-jfZ7 zc)X5#5iY+I#V8p)E_9T=|6Cj`x}t|=6>XGN2}w2cow1Ib;IMbx)~jEYiL9-K+PGaT z$qDQ9+`EuA`yb&}*G8F`7V$Vz*|ln@Y32juhPmNiT}F=E7UG+&r3viT!ja7D$qJ0{ z0mlE zA)Pjvx`vI*oJmr-o?LD{qaK(xh%WDy6ssY~+neB~A**BN5#Jmaf_8(y%JbFs>jDpJk@JWpD`lq`HD+JNEJ=`fC>o`NH z7pi-qFB4CB4ghl)?cn|fpAnQp`(5nwqh`VgK;JjNX*s?#;#o7 zJd1GuUn&6&WaRB9PMx24*HIe;%ew<-Yv(hZUBcI47bmN-fBL`=G8Va3vq9C3>-I6< z7R|7<%Qn_{0+Jcvi2!NOA%ahHp!N*DkWQm9r={Oox@@*nx+)aUc0idtVEG>V`?QO~2_-*v*-B^ZPTJ4vg8Y23I4iI+ z1&G{d)50+xm%ODIxz)clrqK<&7&tK0Z36uuECY*tUF zT$)C5*&w+TX@4XmtYzy@!)Ds5#ILZ~0#Y)jY;%0M?#4aI=3V*Alc+|+%Xy|>T zRFY8?ub=Cg&XyfWX^p-01iIHV4lV|aCbt-ph^>o@C82RcEgin&7Bll#;>$YQ9R%62 zYN=6iX$&uu6t4~LV^t$Rtg$IJUz%UNlMBr_E5n*r0+W6RD(W>YP?;cjdy%T(OcC4$ zTyZpA92K*}h-_M57kRdFpJb7vZSX9Y_0_~k6wLPUs-8K;Xoz?7Z!Y4w-n0Es8y-r` z#ikH)8gvGs@Y%`K?2wKb+|0cQ(UH9sj0iuC?L?CB&f(f}6Q?OdtYA~1?ZeZvftIAg zBz~hp#ASZ(B7|K$= zW|csnr*wz}l?2y`dHq{_9MT3_>8#?bXQ~NQAEAYAOo}&SV#z{xMCa4bQ)|)2Y+fch z{f#NP4!ZIK!BZBYo(-to~}igUvp^m zCVSTh1;cf^*xp5@i6k(K0N-K);lyU?tW6k;nrWV7Fk~r^9rszCe6X2U?EvOaGhTSQ zynT#O(KhM=#kk$c))ImzyGN(Avl>j!#QjQ!Zq25ZlM11tKu9@*9!SrmyR~3K{<&b% z+P&9ABX?+I>$ag$biU%eg?2o@L`vG^%6CwyQ)qR2wyu4Ii}XR4fwH1U@y&X1iRe+E z(4^bFp5K9?FV>7Xz%Z*5`*Ue)TT$x>%4_^(z2dn(b&#d#h=;`0YraeNdIR|cx6efy z*{}V04Cv+CIov;pWonDPIi!0m6|=Wnvb+H)Fe__-5-B|?EueO9$+a-pxqgM7N1-cnJ7CLYHg@f#uoBeSfxR5&o*RPK|DloVzSglVa1Oq`{i4G&dw)^Q>=fba84bkU{Jd|uc>5fr7)Jr1HpsbX2D#TyWJQ=Kd!9v$ z#wL9n2shc`a+_|>%}rZz6cvABHtaJuN(P#w*5+?-7S8^X%$8D|Ku0~lRq(3`lU{~m zO?5}Cd%sS2f06hbXur`0e8u0mgCgZ&!Mpfm*}Qd)2|^7g$h9`?K>KF;?k+LbMRoXM zRiQKiPAdHPh|~~t`nr8^a^Cp)eze2d*N7LCSbw5J>{mDrIpt#d@^*hu^!X*>R~-3& zkp2IKY5y1FXa66+6#j3Fmy4g5@BhbmovFGof}V9>;D0u;_Ckt(&7N{Bz?vjGgo7EA z1bxD5q;Z2Ss~4k{+c)ogy_#0neHy`KS$78^A5S;W(V~5R5_=yV!c?TeEjB{$ zqZ6FpgF{{}8~@4?x_@et{sYFLCL=g)Z3@!s2hShLJIOGt(pqN+&;24FtTOfeA#Wcu z(S-CE!y195?5}<|;J6N%?~JYmhau&!We@JrZHol}^eQ7u15><2#C)xEBdqD^ib4 zi1Re$%3$@7LUKFzo4%f1p7(#%|$p&BYk`nuYitH@~M@%w{xM`it){ z_c2z^##Qh{e%tgjOh(g%%9L^XD-$Oty%^35H>*IY@Z_O=6zlWH9%3-DjGYUY6%{|C zhp?bTYu12GKP62rS$`+^T3Llb6Q!(+Qejgwe)Q2K5a*;wVb%;SnH-Vv7{ua7P|sWVb-g{sUqO$^PX)<$>PA-h5xF8BFZ zfO;_8I7w3mLeD>u%-#PM%1R*;hi!2%d|CXq4q7m>z1*WhT=lNbLF)27u51KIwfgTBXyz<;XXuT**140v z60A6wm5A2l2W?tAtOwaGpOZgf1zRJh)F`O;ZsYz0XWoVxm&fNFHqDpdQ%ilE3T`YD9n_@E4>(lFJ&5 zP;R|!2r(5Jh;Zcw;z~F^rOYosGDg&t@7$#^6I?*>&!}7g_gNjR7P%E-o-C?j%(5(u zIHj8FDi54|WMXj-icB>sQ>tcL2skuBm%u*ZOxXoY7zg1*5dpUAzPzMaDO3gpS{&Kdm(8m$3eHVMpfX~GD2T(QmXRO? zQO2*k1NC!k-;~iTBmiQaM+gyX9V|<+h+8S`4P7hEY=@utULxHw*8c6edt4UuGN)v* zX2;B_L9SBv>sJ(FAg5z{DNOiL&FUy)#F>|iJeE1*YYB_d^$PG1w`Hy_7m$5oQ%f+l z*F^=GKv7u{gJloT-ht5!dv8@H%z!WyxEk~$z6OrcKF0pN8p_05itw94axrHto4+z} z5<`K6q|t?>Jz`dzccVN3L0$U$r}bu^ce^H1+bE5^oILj0zEeE(NuDvSK*#0> z@hx(L*GQx4?JoA>g$@0T?{YA0Jx?@_o7Yz8yV8_Y453uTfev?#?U8H%|1VnpUJ@Cv7V;LQnrI;T&6(KFO14u8ukAnkGt zlvq)RTHEa&{g?_C;Os0_xr$G2QGj;l4^9$SU(ko2gShgG&zqhJb(%sKl-DxMftlWx zATD0$60Z_OKHR<^?$ZE_TYy8G8M2&E7sxVRH`<#0f}_yPnD(W53#xl4nb`?ESy-e% z5w28qt}RIc!SAiUERFlxwG1QBiL4R$-u%Vpsn!N<3{vp?+2~JN%p@C9?LUyP92U*) zd^R2(TlA4s2VXUiBgRLt&PS9T{|?d&Z*x&wLn7VLH>_Mdd}^ME)SM$_=uyOFlPMfh zkH2Kn4C1Ve5fJ&zn-D_GsgID|u;(Ncz{JoBWo8r-#}^=L4VaUSsl-%6lJ>ly23Bm@ zjm0N36KZHNYXCM+!qVzYjF~f{GfP=Q#h&a0_7^Z|Jc76dZgB~WC)Sx7*jHTdsh##Z z1vnj({D;%0P9*oOf)HNSIAJfGP#LZzff=bm`r(Xl>M7ivYXe#2Ztok4b)I(yiNIeA zI(_CnrLSk>FO-xcJIzH{D=->+KL0-H)(XYBC2_{D#P<^=j9w4x{>{nH zG<}GVTb#y{+@NjhmWQ&%5Q$v2mr*0fhfTgBFVjeV_{YvK=NK)00$h&Pe-F1j5zSyQ zDMzKqfN!~kqsP0zKm96Re(KtqnTb5#$r^GD<&Y)f(S5sLr~s_irg%CQZY)f#Zk)kb z?wQPn;SYD5yq=+565BBX;l1jXY&&p=X!xfB=iTLEF+756!T3KbDx6Sa%0&uL))MjT znc1z$+a1~=jCTY9?Sh7c5F4qV*o2#nnNGgC! zBgDRuiT8YpVx~W-JfoHg`yZX>xVlbR0E=SjRY%o^#g1??=WAtREzuicx?4sq4oOVI zD2QVsMMW!&BrLh6PY~15F4o^=*aZY#opfCCbRCp_3I+>3cG$3SMH zi&lR7E{rLvlRi!`IW{e5q;^4ytYW<5I`uKrgHNx*JHCwV0_|sRda_<}2HbAIG)P2m z+;Wv4I+SM-W+*uH7pP5jl|~uC(#tLkFs89u>?&sUahN%K!@qREQE*#NgeFCqdOP$E9(6G?wI55vNiQnNcch4{Kl4vP*7*+V99X?tJW3F|c-;6RtvW0Jd z7odcDN8{PBQzkZW++q6EA{;tyO+S)fpSU8_AqrN;BZq^n%4ybOv`Xj;@H%wo+p=!R zy6dKSjSC5{^^6uJODO61p;feAk(z8G|DOapAcXc)@B`g?_j^ z?ZJ&DiS1Ev4a!BQ2|Q!P!0=mX@Xdei<0_;0MbguPwKClG)6BdX>*qwMMZ- ze?6ClqIelrXNWrmi%%PWN1?wfKdJnah(`}v|7(}A=@!#k-@fRjhI8gJCL#A)hUGqI z$2n(*Lr0gfGh)AP&u`JQe?35#@pX#maG$n$sE!Lys6q`3?u^u%PEpEvHAx{aOr6hf zrDXzzq@8IQ@DZ3GG9!R^)Xy!`jZ}ZfzQ_1g^4lR{>yYtio+4Lo_NSPC7f^iFRkxBo zF>qR^GNdNoPLlTy%Csg-Rk)nv+1soz|2*W^^kZd`1eMoZkwjrLe;ov7(W9s0^0zby z=k!oX;T@2oltmM->Ht4m%@RVnj3qxGu}2X-VwM{lJUrdzcRwVwN?S6ba~`XajU~#B zno^i#L4;YBlN2{w8Y%+mV5Di%9q;M69HT!Cb@;P=6CoiUzF@=KWx`AUBH!0HG&Ey$=b)!iQd!E2GvkwvE_z)()l z=J#B`=ss0uTQWmM_4b)+9Qp70Q$LEWMOWf>*L$Yj%{SIGFeN@ZjYD#cE|Y0uh^(G- zGz6fTDJCPsU*cu--!{mMIHCVO8HrZg^PdP5601c#mWHEY_OevN#n_NrFCuqBeX#;HQ< z*9_>$7JFFm$z0Ziu4N?a-juv4`2`Tmqk_e3Ce!-E$TEzF+bQqt#=j&sjB{JzIQD4s4r91WTUUd6giT*}t zRNhgF&Z*A|D0QROb>$O1Y^gOPsLUnF#kMbfh-Ke4>D0TQcer&wb8@BgA8!$9XR8<0 zfxR^QdwzQ!uZ6?k>^Nh@huieclSg_oH_kA@bSv+aQ&uobw&1rja*(WK?I=X>7V z^$ep0RX%6LR`b7SAd^^a8TNAb%%(nrtd6rJ655@AwmJEi>RJvz^^QE7Bzg(fTrvwI zCiofdHbuMF^)O+re}Uz4<1}K@I`)ki*jyg!yW^&Kvtxn?3xIqGBxs47!`?w%56qgz zrpK^^cl&d66VP19Zjw7C6csro_Y>dZI@3x*;HNsLbO&8_^0fXBwX-qDGC_56p2)Yv zeajPjIk*M%%8s6>hVaqH&1txL6eCPz3&g+91(CrPOcvfu!pu1+GTB2@Qi^P`3-kKJ z9P`<3w_}3&@EvsIITD11wh6XnnjEb92kB<5kxt;gR805P`l@&0YmRGg^?ReZ!JtzB z+-9qIQI5kp_!Cv{g=4X9%W3vVz^urH>gdcI+ zF*(eV+6|x}kQti<#qliC?196pYv#0SLSIZzzLPW}nu5RpBlfa|W6-54| zFQ@X;HI6Nd+oF-ZcI?BkSE?nNGv%i zXJnN%HoW3pWNd~I*C|dv-Hsd;eCMXZg(`yhNcG80V*{_Hb(W^1yO2+R;Re z+aXI^-Ql&n(~6ab6*+(13;V=q-`zs#sE4UVlVut=r^_yO@g zEByIINv+x_S>rdUNgRm7X@j1qJL5`b#C?AV(Le6#xp3@*KB4gAq;TisktNOZ<>6@O zi9-0X|ML}~I`esZ9TNC@Z+du2_W4{-)|ggEU-9;E-5>P&+VT1K208Lo^oi*9CgpwP zKBV`3nUG_r?vlCmr`rr892Qv1SkCTO@ z&-Y~s1_oK+>-2r7R5UcV8f9GhchXsRzMJ!;iIWU`IYT?YSHewAnkO_N)9@(0s&uVw z)~l02NI}V8-u`}q$zR|V&>MzMFYl4IlvBthN)S7Vp4YzzsA|c&aDKoUx3kpEW^p0A zu8EQagDm89Jw;sJ34fPhVo}6ZElM-40YYjI};U{~-E95HmE@ z{03=9);k_!ySExbHVgzT%lA5rt#VUy;!bj&i-p|te0RX$(P``v3x09 z%d6Pi7&0*s;JdHm1WX0NZbxmb>e`e*J&SkqT>c^AZ)(#zGyJO5C_bHC)pwlC<)LCK z>PwbHl$7LoPU((fgtdXUlAFg!!80xQ4}rfMr(QVobM-c6q$60RZo%$d@jpX=BhUtd z9LGhr!2xmaW&8oS8(Ly@jN^tFh688g@kbY|4I8leJsx7mggN!thukQ#b}R7_fzgq0 zTh=UEn4nXCiXZHRJ3`u=2dvx_st=h=qZzqTxis|Ya2M#GPNOG})1gjlX)%X>L<57_ zub!&z!z?Y#m$M(u(n4dFBaXT5XivnL==(_p4B4Yj^%obV0C9~$BMB0# zqn!QKv+5VIJu@h|>E*Wd08?@jCyd8C6A|37mt4d|dTb$&yp4$-* zU?OuacOjB;q0o?6K4TN>itI=r?{2M@Up8}tJ)GQRIrTR{=~@^WfYKPNHf;<>F0qcC zO16y%3wgdw*s#7Pidg!UdSs1Bq%od(Mys{mZPJdfSzAn7)g*}ZEie9<0ao!Q4k*o3 z>bFA~NEWoH8=hWiniB!i1X@)Gv8Q=)HUW_|_$X(0TzTR_){_Zq2jUQ0ZU+J>8m8aR z)|nmZyI0YT&~z>>ue7@m!70cjJq1v`;OVciIOACvZZe&EBUF%Vp#Lm0j-oR;eWwAt1u+tyO zDzk#(YMK4am2}DWp{shG&tBjKi~Zhm{p|TKJ^^*K8qlwece07sO8CV1i%H%ZkFNz2 zEc|S4tS&PJCM2DguM$TjeS19t)IG9qrVEMy+6~Tjn|55^BX1)vmbF|OYHE)^iKWei z2$gc+ENw0-zXj%Rc{%kddMOtlSU<+F1(PKhDD}5&C%=cU@Q(KvmmQ1MY|RR*Z_Y!t zV&2FZH$IQ0H7sBs+FT=Fof9VxtC?|cK(S#745EmdK!@$Ef5tC9uwq{ts~xXY_lomH zWxzsR>Rdw4VRP|RJzL`Qc^WHiJS*n%E|l~MQBnsyXuOaZ&TWYe#w@xDqej4L3u=|e zeBSW9^O3SovG8CSBd(Lue<^pXyrum8o4vFP%5ZI~o1X6%u>lLhk}Cm_hX#=vs2;uQ zT6BACGYz|`?eEl<5x`EO1@ooj{My#QtEo6G<(tPVo z`ej2dF1J*2EdqUsgR!$CIXA(dJh}P^_83pG1GRBaAVF%-%Sg~lRfk<48Dulcgd8E5 zG@wK;La;(I5z)jR88&Ki&UxErGUzmsA8pYodY1nhGmI`{6Aw zN$$dNUCi;%m37aJNf0)nHa$h&BpJ2$7v#r_1YU#Al#HJFA2O2c9tq=lEEBvM(M`n) zlZj6b&wnmaeSQROqPW`B>%~3>8!7bdM! z$_S|fPQYU=Rz36aLAG8xV#K7feJ_gE=H1#lft{c5Y{@N0t`;w^1g2mf%{tyU&hqv_ zb0|`DKs^FOs?&f_*_SOyo5?i3IPaFREhXw+`IBy!xE=L!KQfx*(R%kP5d5rlv}pAk z=#(qDfIn;v46CHC*9Fqzww&nd>XZkkhxZh~s=318^15$3(%WFczllu38Ahglpo?aSaXEFV1etKm0||P?T=gv_Kg%jn=LWljqVFE|Kp0;hj8sq%SuDF zX@{R9Gr#-6q3hns47mdco9=zD340d^Q)>1uVySUMK@0nkBtsfuFJnaAksIvYM>uI# z%8^i)m+{3`VKE=PWNpdzh>Wnmdcj+RUC9D#6RiBKSGc|0HhLOW`CNfBcZOtc^9ErG zqXlckBQeo)8EYlP!`61-E#wxdep=2f<|dAvZ)rb8VL4Z!jt#g}=Wd*L1pt{B{_sUx z@-i0&cpv8>r?P8wmtyv%f89T)defb1PVV~=xb4h(jz{Rqp-Rk79h|Szu@>9{G^}t$ zLbs+IPU_eT*X2G(x3^FJypD0}Vb5ls9KFVp5&6%28qwMDIJG;yZN?v4uB@0{?pKgK zar(o>NhXM-G*hLPKS-BqqvJnmm+V!qCB*zb7~RiQnvq;ghvTdHT6;HZ zU)BV7`^DXp(i8<%r&7TM@(n02N00k?U7{P}2qFkTzM6-0lg7#n*S2?v9Fs1JysLVi z4P`E2cnf+Zvc}euuCO2@O(wP1@yEkS8~!ECziD1l2EN5crikN07mF~xl;mZ?WYkj9j>aVBMZmjz zD2eFX*bEou+s#6#F9Hb9u_E7>|Iz}dyuY?RWGsX(#ZmJQIf^*&OOw+~P{kH!%sJ!^ za=MlC(m8LIC^fOnz`kco&W**#a(0l$1mM@&58PuXm_3uv_es9F>zaTktrf^o1-0vg zjt7YELj*i8D=i$m=UP_NCn=Ytpx9L!R96UK8iz}Q8jLF+(OOZK&FM=*8N4V8ryO2j zX7v`#M7B3KkGJZeM)blCm*JnBMP|Ido(P&bdMJ!g9atzZq=9n`yUQi|r_PZ#|E6I< zeA`{2AR{y0cB3CBpsC3ZQt$C&>GGBUa3nZp@EfxfUvbtXYbH#1emmK9c4aqAx-Mv7 zi)vPOAT3*-;ENDil&?Ww!ne?ffav(fI5d}6vV{~@+`W)B!nwLv-V&LB%GFU!yWkWGXu6q z#Al0g9xa^e_cU4u#55%MQU>gvvRyG6y|0;c&tlzQY2iD+YUhaH*~ciWye48jr&|0J zh)5)aXZ-9_#M zZw3kEvPdw-={|7QYC*=t0X;X1p327I$T8V@?c`Zoz5$Y+2>mMbO1~V5v?eD%ccaxf zTEMlCOK65jB@bd)p*f3CJo&Du zKOVY=f-igOq%nc-|pi`r)~-T;kOtDx`Qj?9ZT-ET{`pz1u(0p zYW20Z4Ijr}gL#3}v9vDSq=O(%EGFKXE*s_@pNp1>(83gJYHd=pY>9keIwJ?8lh7Nn z_u}u)<3-Io`T97wj)>{@nBokC>gGwEfT3W_w3^PoWSMJ+raB}-08g!Kp5|}j$VX{F zHG^Cz=^wQE4EazNnt=UXz06T{4#i>O2y(G&owgR#`IR4CVon1qsTcSauUAe~q_<*K zn3B3@b_k^WBvW{Z)zW1|Bl#!Xi9DuejgSe`}la4FQI-E?-*t`VkP&fme=FSJk`)ZDfW`gDXOn|J{MnQr)-lc*pQ z!kM>|musbr-Sz#P)3f^?Hlxj{J|dKvQw%Ri9Q#JTNP7-Hex>AtFyG2K>AuoB;c0c~ z$dQXJ*U4n!c}HU7sw6nEUo2nJY8{yKUyx~0!^uvJ>Qbtl53qvvD3vCr}vWu!?P^-o}L>BcP6CVnC-P2;{>ip!$_q zi1USJfVGVVQ>4ycte5>V!&q#t@=h5fVyZ2LMv2tlN>Dd^Wrhsw*qqDH)LRPY^-WoC z=Zwqs-sKd?nIa75PZnf$WL0@+gx;p*m6yeat=^KwxKeRhz^)UmBpnF`8Vn2bOfS1u zeZN<6+p6DNya!L+)1d~62*+mv-%uCg*$fP&%6eD0r_@pB{U{Xg^5(>5uO~b%BXY?; zIdERc;C>$?jZ@a;2H>)ksl$kv;OXe2Fi976-I;0u=>M&JB`LptDI)7L#I4fh!@{X@ zaB7K{Vd-}A4&!$<*ZvX@wxUfr0J(*?Cb*JN*&>R&yrb?l@LxLz!%~!P-bcn|)sAz> zok{nHS;4v#9cLp5NjGn4-0hDKT>7@MxLHg5jNbG6eUni zV9y_+{7?$`gKb~EtDCr?Dt+%Ng)uO+4V7#Er|JDPQ8GFD%)@mj3UXHmxXR zuBX{%3r^=di{H{{-YX2zpQsf1e+5N*rBE9tW1L~X!Yq-T^}^7T>doNELRZ`ViO076 zb^M^D7f*em#F^eJZw8(T(q@V1tl6UzNsM#OgE~#@AjhAV>bB^F;2;zv>2jCiYdIJW z-y@)^%KCn3=7f_X6L^>v+`+HLT`2~0%FSA;f4p2tMuBY8TW|5Sp3_GiTE;oba9`z$ zmrGEuskH}8NC~Q|)kEN+VTnWXS+k4*Rk}L0xr}S{FEakL%6(9)J`#B(M53fc`gsC^ z-c!+Za1boMG8Nl4m8&*J(x%8S(FV`qS}=msZ9Ho;)MsDS3^-H~9xpm80Z(aI4e6is*+nNG~Ic_|%QE33&a z;5l_uH6-tvPYbBdJVliibC!cZ%^_C3iGrJJ0wyq0e#lQtj4_ATXhOjb8&1!1k8W6( z4P^rHm=^~y321>oTn{O`xT=SvGYaR{#Y+HVL-KY31V&~9f%d5TQo?G{&*sVWjT*1H zN+a~%E=0{y!+)MUkjiKp)qfY@bp3W>T_V0>c`&JkD}h5zF_<71KPtO)Rh;&NI_2Qg#c134Fdjj-)RZJTXI6h`hs8IeVs4REp{lHkvs(6S7L{{Dh8ofOWZ}Yax2W+5+x~^ z>`%FDH!cC`@gIxiZ;R^fVBU8SEW+n5sHWN%7(3*gg~{e_O(9m1pPxW(laz_kB$EyfvQ7GH z>P;o1OmZOrd9BDSNkcG(>nf2o*z)(?wtjAz4Q)4{j&GAP@!Vfg;Cq~4##;2yE_Z`T z9L)zXNyDFL`0WzHr3+(PSI;VTDx^HrFi_ea$R1sfT#S*@{NpjlweA5i?7=PS7D` zOBTboeNOk>zTI;t=Fb-sQ4v*<`^~IexmITFs;qn}&52k#KMObrxf#n(LzaWA~rKD7r#Fg?lj5?@?Pp*eA}NxuA34{D%~rtJk$MjW%w>v zk$i9;Y=3osWZxSfE*j7Ns!JEo1xf04P(A*_r!!A4JKou2)8u7$fYgknx+T|8eTB4k zn=4a$kXbEO&&Gi6@=JT7T!Wkb(ZA4+rnlCz#_qfWa>jS)e4U#&~g&J}XJ8iKt_`6)b&xOzjz!<|ZIDvsT6F~pR*?1UG&*jiyH=#m~Gt!R;{V})bz zwZgkPTr`F?&Chtoczz2)ECKk8)a;hAMj8=?JJ;&Iu|8q`o{<}D%bI2YA88Syb@w3W zZ8*KXJl)TqA_BTR@yT{VBc!8|*(-n|P1^uJ!~Rk^?1FXVp^ZN}$+ zJ{^ASG6cN2KMpZCe{OtUjs6xt%xg-075MObJm1{z?+`h>oxD!$aP0kj^XT86M99B> z|JVmC@jd5jRh1XG68P|znBA8PbFN%+_y7<9ZndOpIa1v}Zw(#;rZcxcFF*DJt|XJx zue4YD?>tBF>l~HQs(<+kg}^8mIkfBmA1;I?WdK&puKWt};L)&pz z36ixnEn554c~gJpTQOozr(zMzZY^s^LO{5AW(S1{(dW?->v_Zi-Ma}*$-7rtD|pBp zx?W^>t$@UAB>gJHg;+J2@?_huJUky&nu$e7&y?P1(nqH=b`Pm5shC8)B7{M;WJi*= z%T~t0aKa^zDt2)F^dgC{??u7igOd(_B2oEs(A1n@W|h{ZqGm+AR4_Z2UU9#B!6ur2 z84(IkJRuI`)Fg%9Wz{S=uDR}BIGW6GSTpbZ!a&j-0?*+^S-krJb&U#VE%Rz87xVJU zVFi}>Fzz^P5~+VzewQB-E*|UWfk>QD&8?)jq+zEWbo$Js-L2>jxMxZYHIs)A#9>A# zJh7*|V6nl*I?=p7QbC~!5?&}p|ij`(ukToX{+cBUz)hy z_g%hWLaNzX!*9hx~o2+uX-JT!-Yg)zyuUa*&0TH%vS zx$6Z@g0SMKt(*owYAs*X%|Jiwa5`hnU^_4;k9=B`K-~B{CE29k#NX^=Kwxm_yY}~W zUFa_-VOW^as2Z&9k*f7znNM%dS`Z7|+R-gWA3`j3MucEV>>YZJp-)6wN;;v8{f$vI;gGG(uH?%)$NmG%=7}=|c&jv(i1PUtX1ki02cj zvPP+Sb~ae!`f;0tlQH*<&*ricmwf%G+x?)VRINjZYZk6?s6`jYPN?El58%SGB&gd| z#hQQ|_<^V4Iaj%XP_XIsEHi~qGxE@cfEuqybvlzNoYl*v!=dKw?4 zueh+d$?}#Jj3EnO<@=)H4*fz|S&4<-4F^)U3o1v`wK~r*YV0eevJgxv>Ta=5M6Xcl zUc%w=oQdKF^+P{V^S3dlCNu>EXCwGh;G16+~H%7l*Y0MK#YF~d#-%vO-cZV;NfVAisg=#su=SW!fXUt14YrC8(q=i=+Jlvhq`v^%ajN?>h6 z8^9q+$%$a@BuTq{mqhWz+(yc+M-0B^)8d>XE1BNI+ zn*DCPLYdT*Rq;YE*6_<(>{~-!uW-#Pp8fiHb)%gw9UGBpF+qCE5-+}+tf~bzmVS>p z7*n3srZmvCK<-TJ`Ly`}M+PENgyJAxVOx#0D%MYdD?cJyWC-M=Xx?u^@tSe2ka4=S z2t8R}729z-%}5b^pTC{E3sUjWS3@$mEZJ7Pp8ce5@tkxs9Z`1Y(vkufjp9Oo&234r zv$m8nL66&=DGF`p#iW$XA=B^iQurb%n=5iXotq|~`55wuhIZ#JshB&)t12|^wyhSF z88GdKfkGPD`<sZ9}j0xCQNt zx9lmG>$(hbWlKV_*xad+drg|DZJn+IB}7W|_Co2dj^JqtcNfBh5a?-di++VhTJ#!| zn`X4S*J6_-3Z$(VFf$$JF7gx$LRmDjBB8ECWp%BZOt5GBD;^ig+9%$$f%!ACW9mC4 z&UZ<98!kyQ_M9YewMAW?R-$6kEJ*8NR=qreRP&l;Im!h~RW}{?93T8R1?WBe*_9SU z*9)QS$yP$iOS^)S*c%E8D%jG@_(5KTxprYY%Q$VdY6@eP;l_ThBQGtKdupddtEu`7 z{OGSk^t_W9tNmOV3y^S5B(_~x3#qCd7Wz&@ZF}_DbMfn%b?&l2Z*c`bHAi@Jyy5P2EVG-T-MS-8-iu|6I;-h5<`ia%{o;BFkOdkVVbjH{^#ymQ-Pf9vxc1z@%&A zMp>NG%!?cK5<=Oq1idk)aDj68t>=cz{k!LT)VB$%t7R}B;p>{%txE&w<3+G@U%a8_ z=Xv#pcE^bc1H3|Vyp@f~-AwoJ;N{)(aL~c$8FIr4zZ@1e?W}sz4D)mPbO^n1BR3KGLi__8g7Uv ziDK0U0uEziiIjS{K_=v@em5BH?=cSrxdJg}>WLSG%(UVT@?w$e4Nb3J_OHHEEcVT9 z?O9$ZSGN`DCX^jMzABKcs}4Df)T-s)mneB1d2yiDG<>W5S15Vl4qdD@^*h-DyH<{x zzK=xZ?tv$wT|Aon0^6by2Lq}TWFk(x*HxV3%%w=r-NGwW#c9r0kRHC;r(wTty)m0z z)1y+sl@(E0c%P&eb$4z(t)&8R-E*7CtiOFVyIrvT# zX8D)%bl34Lfe4RGY;#C9@v?E3#xpM%AM2t9f_O9>A5hTg(0GS3+3)X)tfyyW&{;0l z;JL65X|K0e@RQYTi&8AiKoVplt;;3z>FWjsX2H4dZa7J=@K#k$YzMaP<`NvkYZAI- z10G`9<2IH%xC!?ArX;@ua3?XeOsUe+N~kweN>6x-okp{mrZBQYsg2RSg5GhZD3$2n z%igueX)~dSjtQn|;t_odQ$zN((4?D2{|^@Hl2Izd9_9`4=Nj5~hy5hhFu zaR*Z;!L~-)qnuP0#me4cJhX7x&@%J`ez0mJm-)uyoP^g;=1Fg!6A5@7*&#ydIC)1! zBT>5Noav0*>q$7ry6z|ZjG-9yT@vpbW3V$wuL>R%YnCL!1w4cz|8IMuP%;0XR?ho3 z$W5{^Y%<$eue`6L)o^DnyxS%AX`o1VDO(b4ys&lj>b;J;!_Z3=H7gt(5|d?$-ESk~ zg$F7^xUPM-M!%CR6CZ}k6`DT!)>}EkZU%KaOr-`Qc?V{R@O~l? zan(rYeV42!Kr_sBJO1ISK~)Urs8f?VA?bxdj?J^+E_rZPv9g96G`5V?=tDInvE0N%2#g6Ng1?6jngO` z7h8sO!5+i0u%V(wRXVq!EERf!hXtLgu!2!D4T^~7(jrZ_!%-yF4XwN+W};jw`^4a? zk!>21me>pBsh1#_1 zhjZni1~8_!7G5B$9J6+Y29I6JAVb)GJt{4xg&JN{aC;l><)*WO?a-C!iqnBjM04iLo1@V`};|4a7zB>uZE%71(r{U&@z(V|GblB{x8ho z-!w=?BMSiCP}$7I!Ohvk%;gUz5`fS&Q85E>CI6ylGN_t)x)L%-*#U&Y|JaNCV=o23 zGKzV+N~pL3NQr;4O0W?Ac~wstnn8k%@Xt-NG5`|EAnsuA`sY`1RzlXl)BdE&nVDJ{ z2|IZHO|xVN04q7T{^C6T!C$%%>ipqiDii)eODY3S10a98n%M)=|9~0+<*NXInkKG_ zM$UgF0vMZr9}+VCmF>UKlw1J$KQW?z;V=LHG5CLHCctneMgafwPlf-YW&V#Q|1abI zTB7_P)&WSP|ATdaM*Dwe-v6yh{|Jcx?l9*6*`z-q^8d(V|F#pUmZs8zv7wcUs$V_^lUaB9QF>T zPPTw|kN%mTK~qbc5P*c`VE&sC3s7;HnK_s^yIGjK8UMSAos*fKoe9wM{^H46(%W*e zuv@vi+cCPcv;Mo5jg{?hpzJ@iR(8fNi~xLyi#03zziU~zIR7TM{zJ>;>S$qU>dfKB zW##toTEOY)nYaMY&iB1_wV-jr&a7sob&+1Efc^6fKkDc-pk00 z)6tI2osE?fz{UOVS~hkzdJbkz7C`D>T2^l^XLA;26H_i%+ke-xvT)Heb1?%v^_P^{ z*vp#D!pg(Z+r#MJr7WxfP%VJ9`_Cq`cW1XXcP%pu00+#<#RBl!U*&Ro zJDXU0nKGNOIGFysmWh)c&qGtfpLMggXBSi2v0N>`e5WOaSEX zKPwE#?d|4a;%H`K@gHI~W&l^2gM;m#VkQeVcV|l@V`n2blm8I20sz7+Ok6Dg6x+JE zxY(OH*g1H(1HAEX)c_EO={cAH=-z)+!`|G{m6L;s(Sg%~`9H+Wj7;?GtZa<`6f?Ox zuyEShv2mDr0AlU$P$&GSfq#eZzXblj5PJW==ADThb%mr|n z_^Y1)MpAVvGY>OoMQ1Z}0P)%WA6K})^Z&dlrNgtGKX(f*Q@(>?{(i%nS;Hv zh%*$F6d@?EZdwCoA|5g+GAO8!85CzQjF1_UlH-t>uHzUOoDmZ(3*%WwaU3`IRYl|^ z>~~IB`-81V)!)5DF)zJ$zVX}FKKYLvn~VMOY8o2(Rousmm1liGkY73wMJaLkTii1k z1u*)0?G(ls<2WF41!4F1tGRB6W;f5Twej27eTTY4J% z&v3IlCclA;(`lG*2;wya`4DFpcWHTdgD1dfhO6L#G9)hMc>#&L&Er;IQuFuuvX7^- z`0ol{v1;ORhkx}R8-_OzcHl0IlN`))1>A~}jTAzBIWQR?;yIqqXL^6Nv_xQkAfs*G zc{|0RW8|!*lgy7hY=tSy7nodGyz~2F1a}0d$Jp=X)P>1T?1$goXZWKA49p>J9b%^& z>D!yn6pA1aG(6!c>wFDEexO4E_|+GzJ2lEMsizi+#+pzopr=H7Q@4+)pvOrNWs7f! zX#SpW5oXl0T!*U@F0ykV3RW*grxqdXCZ|Prg4w;6%e-?(Q`|mCmdA1izCe}~=b%)M z8&;bX{yfwAcM4=BHy1wqRJ7)|(IzjKZ@af~Yt)qJve-QNl#n*#;0M}?_Q})@Sqb5|g1ituj3f&N@It|ci4RNMkT#Gk z7+gScr(aD`b|!|v<)DnhK+`U-;hdjEyTOJeG9r-=p=yHbv;?AC}M2SW*EYSrTvZnDy?vY z-i#E|m@1c@5N!OCm=lAG4?%nYG-XUH!+td9`CCZ|#)FXp<^HUKY336%nmgoWjZ1@^IX#UANDBD-p(4NUQJ# zCWGS!h=3!n%;Vf>g^ZAuEEWZaquNeHC-OAV8&@ub=Hwn3W!EpgarqD@N(1DuC>oMQ zMh!nJWSQ%jdo8_x%*P!YxoeZWSn&+;Z{;aJa!ccKaN0YjaUpObbbRdM?>&$DtUd+? zh!#+FkodYkCFj_<)Ww#pX1+?UuyE5~=d5oJHKyu zYB0%v{_O3gau}uG>vOCwccSyt!F4+B!|oFzKO(>z{zSmgZxkIaypqt7W>K$Q_~%DQ zr#{dbyig~fR5Cs5*+Gw1bc3zN)GDodT#*|Yuj{_Y+63f$h~;oNSlp1iCRlnw=#*%0 z=2vGo>~<`Bdjjb~V6txL*3?sHkhF~NwLoD~ryfQW;4ScxmR|Gc13^JbE(yjjsnZ)! zkDJmQBuiAR$e>&85xw^&kLRKrSNjRoVBw}U`Hszrw1xH1GFWbDS;YL0P_ zZ;lU4z9)3{;bGrwQRNUUA91aka}aYh3V1|LSFmM@WP$!RWs@yL7$T!Jy|ZHDjX4#< zydM2_l^MA!0?UR;ZUkxmOWf=XH^0+C6*%3aRC$UfB4V3-P zW_Hle^STdK0jIGqHaub%eX4&`5sroNT0Z8S$&X`R<+hJm+$F>tZiaBp!arE~76xM3 z3p-qm(%6I%KfDk=|8&rkF?{JjOH(9IZaZX!DV*l!xmt@qyfD)f^kG7}had5Ja_k;W z5+V@+WYis233l^?&V*7yB7Wyr~DfD5=}6Y(2j3VvuHri26Xe7@0B&Pb)p zLVoi(ZJEKI+-2q$N*kT=&IYi<3n^#RYj50AHy&OH{(B00iPL^0_2PBeft+f`Ev$&< z3&oB^P{lE6rDaf9h=vQ(4p{e)^rr=bZ_b)jJ#ikjpfm@&u%dCL^z!@I{rq0|I|$sNW=nbIW7UkT!M$yuUGdAq+r91ueH3*n0{c7d1*$QfQX@38Dqq(h1{r1>n=o|ZSNV)hnu zIpSQ9_l`SarF@KBN1CQN^`gScthJGZG3>>+Gm>hy|ko-_8nY%FYjl(H{vy~Y+q^mZl|uN zIBpt#jQ@mmN8E4rHza;yA!TXtWT}?OnQ{;4Xvev37Y8HqnMaKQI?Y!IQXzL`7(cNN z4PVi>9|_6a8Ew0f!@KP#HlIP7jiuODZFF*p`vvR`_{LU6CK*Tapgt^*hhh?>O)JH&9-}!Y5>l zkZ^;(2iC0(K(ly*62^+YVzMRz=`uK_-6hlWB=gDhN+Du>x@%oU_J&%WGYo&Z#f5ypLh1Qli| zOx!4~K~t037F9beJ@hdwK5Tywig%$%Y6|UcxFZ8GB6?w%Y%h1uV$Wh;^;GOsC1k>} z&nUHU^TwmIomUW_Bt8}A@b@qIXSh z%%O`ZrKvY%m*-?A+_M<;Oiv_U6IpgDFvt4*!?(oZ7yJh)l_f@pbk|ktr+3f^Mk~a9 zq2nTb8toWNrZKQ+513zC2Y~HDr_BUEC{w;d?TUFyJSB|pZQj&b67xHU@>9kLQ9b2y z3RXo_IP&n4(~_nS!fQv@&BuS|PT6OHoFQNgN=d#3)n%J!QH-hzRoH>fkoW|aP>f&s zHf>tQiBlH_?F{7$j!>YEDusa=1&9BOmrp}iM$eQ-@s^0aKFOC@(!L{!6}la(ugsws z;v>p37$H<5@m1r*WRUW5IY0Mz5fb`5MB?ih5;h*8hbe0sNDFrS{V-&?`K3Huy zcsWW2vauqw`#>FRd^q%y1_)%92)5Cvglo*{N zcct~ojV;8M$1yy(1Kkd#d-8{W1?mXbrRfXvANh9Qb)T=))m<3Yi#8*UaD&+Pi^g{l z(nvtHWTrr8VJH-Abqm4B_GQ48o@pTG-is0YVeq{Pbm0&*&WXkgZo`LZs%U>cI!vq5 zrIBC`L`0?=`t=ue2Q`=JUZ~JS5Xj5Q{khI&d6kBlnAdF6JU-bb%81w|^gh^gV^2jI z&ID9MFi`*Oxl?xz{E~gM_7r(BF%GFx0ch9CXgHfy?QNMl6Fv@8QD~{j0+We%dQ6!* zTyuD?cA1*Z-bjmz40+=L*QYXbWxA|;8fld}2pidE>w3nrx1pu14TZ|+GKt^xoMyF7kjBE85cR?W_m`<Q)Q;kR%$U^%Rkzk|KGy54)D5o5K}}ArPMxZp>x7-cU0}t5 z@_SfTJFF3XPLQf!;VSocoi8Ocv@HkJjV9Tw)Gcds#MWMWvwd$U1UmobV>QrF);e_a zZNylOP?P%uB6|0S;7#3fCMWT68+HV1t=emo zuX{kXYQbnr>nCVR;A&!jZWaq)vI(W)Csd57^E{QcC!b6^IdbjDh^*)V5og)>{+GNf zwfXl!ckF07MYfoRS3}r`%lq)Y($)7LlsG%p8_ACB7@|TdD2keqL0qsPU9L(A<6q=;oCg)Z-9TnMTHCowZKbX>LBUi2~qO?H3 z$&6AsJQVs;!Crz!USy8G)BkY)!B5K?s-3)QCZY+HBEUzJ%v)+mDZ;TcP|{rT%E;yx6w%P@LQO#AtKn)f)pkrb> zy;>{PWd4@&qDLLrW-jc9L*^>EuoGVU>{;ud4xVt(nR3`9n>MEjdr!=ArP&MhPLz*f;%;@ob}9ktw_a&PwW$bHfee?>NgLhO zte;!GA}9otVqpbcVsf_(C93oK%OUwj*eZK@`A_}|QA3`hQe3v-XH5LD1_i^{tc6g> zqw2^>rWQj_+#!D0dSk{fgiY1h;m?>?gN_M$NY0^-*=g1A)TqFo4uO|(ko-^BBG@5c zIwMOl#Z5B8jrhX588JP%(pU(FoMB%XaSt_;g6l6|pbs`z+}YyWruph~D9X<#Dr0#& z?VLmK2iA_YOI5p~@U${t}#rq3t|OpB~u*f@#O{l(%>G)@xj z*d?CzLgb4HhW0TZj1W)8JpDxlf)DQr&s~Ot09G|14AEfrjT#m}0RAV;BAh@NmS2ld zg$H4l8Fjz_n!vy$;q*0<+An^MA}$6$GHCu0QSF7E{ajC!#}A^?9CtK zk#ZWb6AWt*<2$0fgBOoq?d2980_2P5V+#_J+K`S{a!$ z2OM>=gCGrfR8CKjfy zn5LuWfsYlK)tKpfZeKr)boy90_{N1EjkXVo(~L1Ajab92Fqcgj=lodf?ab>?Xx2a& zCt>U)FKTjdE;7NrB(m)JsGOy?u9+WQt_a z2|^8d^1>h?}& zgriDQ^!Z=+gF%qdcWr10iwk@&X|n{Z!Tezy{9!{k zsJ@Xx!$989BPUcf2UKmPUpWznSYcn8d}E1X2Lem)5^jIvUIKz2+!zr2T;cUhn4CNY zCIyTFLwox~94#oI2zts|3xV_pSW6dr>c z2KS^P7bF7ai4o%%17N|f}c+IqrxwKxJ9i>+7rKyDT`wPRyr>tCUI(WuxnTDZ>(><4$2 z`M-9#bMe)+QY)=c$8$CN(-5t&8`{(8g*4xXzPKluYte|<>NUVXI@R=$5AWI2)LU78 z>3{Px&fxTVQYduM=2(uccktsZFKo?0I?W01TE6j6mWcqnv}F#RJi z2N;?K?m!Hvh(<}$sptg4P_INrs9(g%^5|64sGU{K<7#hGz+777HR+A`Q)!L(!^~d; z9y8cD6A3q7*rqbRqwC2vu4`dtY1j8T#`U)z+RANk7ZWvOM|0%znZQSv&sy6>PGhfg zld!99tUH{|>gIi?e~gCPgjTVkSaitE%3*7%HE1%hvwfKhw;LK$RnPrZQ_Y>P#&u*t zjWa}DZ90ps|C=Sfdi8MkLQCzhZ76x@xjAt1DdVLU8rEKrAb)J5VlDL7I%*U<>_Zg{ zxa006#%}}k_1LZN=8XGEW5JUaG~n))riv4U?oe(~dQFX~ZbwO8LaeyKh|9$&OWnlztxP$Xhxi8Sbmg()GzH3ds*HY2X-y*-MZC+{K(#UaT*!?5jctOMkguNk?RMMp_4tC=!=!un~-Awq146|w(1G92&@%tc%AMsBhpx+`~5rHKm2qQ6%GC+ui zAww6oi_a$&LvV*Fx!bly@GT>mhOY?;iXy=hA}}T1(4mRKSK)pkMH9wpPB8(6o$k-! zbrGdGUI2|mmL>?&h9D2;MSg}lp9E9a_#Sc1462}D! zMUogX?pfiU8$#}#GNL-6!pa1|Svcat4IzzvgB)&TWPV4B+2_QU7s>hJc1psCBu*s1 z$B8u~SQGK%v|k7$MkpuP6LcoDQBoo7HZqnG=$1Ba!O-}nehxGlv3$sMxEZNG6l*S! z6QzuBCYS|jHmLHH!H|87YcA50J}319_edCJ{*E&bVgYG3qM7LvI3tKNe;+s_^8*%r zkak#%5vMV8-#8AS~Jte@~f{zzIt?(35FQ#*lxnwBgGwvt|ZA z=pk{a=obW4AbEfglr%YMDQT$?8*<<^^U%1?j#>j55#@o{WAUV^(1}Z4&SbkK4D)Gw?p4g1qO6*KY&pznXSw`=0)*d?^<@YG`v=b>1NJ>Zw*=pyw`BW44Tf|gCAdEDJ^xq3r@PxR< z919rqImOuJ?1_J(^9_8$+k?5`^NoB0!4(BOw-V+ZXawPx59RU`0Q^# z4frImU-*1+*tlo#A1DjBMalknW>~$S^*3HX_{3p9HbOk%@v!5sgc`%dg!N=QX7SRBc>N{rXsgj`fm^xuv|VZCTujU7~r^cG$ii5}`>^C%;?$bM0Mp06Z^K4}uzOP(*v?d!P1H(uBCcTzzB@H3X zJ5%)2S8h&1A2jpq>N)Lp@xx%TY~K=fca z?)j7ZHyycEFzIkwP{T@%8)n^`h7DLf7wW}wjXHX(1qP{)iWDOhy*ZKT6bT~f1EJm% z`Y%;4L77oAd6S2?9XUIR1Env#{OPkHTpd$Desgo$Yiei?$!6KOhV5AwQP#f;vfiqC zjseYCb_x!~RE!!#pSyxiZgOi2SVe(d6L`EQ{(VZFfVD-PQk6VH zB~6X%z?Bo%d?}J7G~~AWSDl;k&nNLaV6Cm*{;BSj^GBb}9gg4?tX~klLj5cIV`iol z*Ydn_Gql(8VRbPKv1-cw_`yfe^)L zBz(`RQHz!z5Vf|jIO&d@$WRbO#z74X=wHK&Gop5!aW&pvHY-5+t7jvo*$4sJLQ%s8Qs|I%kIb+(jWPxNj+^b0pVIYh@1b50|-#Fra$wyg{ z-(S^!f+Ah&@l)euS(k6(|3H{(IscxUZxDGQT!{CJge+|2c_f26bSrlqFa<^ire>B(mYP8wGLZ1TX(=S6F9ft@-hRAR zy;i%$t0B~)8Z931{ae63=DgJH*W$W51cd_V^omYMK@(x7NfT( zAl@KZ)yj&z9hg^NK0r_ZA)m6lR8^IgRRvvfE2I{`o+YFfVqHaTvl<^xb6@3sPkHrY zXuCLKA=MGyqCGs?k1y&44dbJ$zZ`E{ifKBuQpE<)>rbwy#F~+&;J|4BMTT?*-(L%O zI)3sqazeh-oPo!m2pBtlo((vX2l87gD*Y%fB39MfeMsq&*v(Dtzi+H`q-D$dsQUWVS)1j1GKfk=)RT ztN9?Dg;1{wq-0M>tWE;2+4y<)7RDj>-0~WoB^J(Z?K%8hsOgnlu1)VyD=51bdb8pE zlT3(bK)Uap1C&^}hN`A$xujZfnY2x3?dxVs7B1Mh{Mj;&ueSPUw`;(Q`|aSB6sFKH zR|!-Sj<2;yAwz(d(K&_5RVY=zh+kM1FI_Wyqrxe-L!fiq_tr^;!3}qBVp)S&aZ^v8 zfF$ZCpb$+SyL7JrvF(UMKKXL9ZTWlClCFUaZ>iYMW39qYYSGnX+>>N^BX6sJNNJk? z*rD9RVF5pDU!h_Q2SozDGJ<>GgpJKQHjXhNk&U|wj}4D4?o4@jNjZMF^lYKp=Sj=- zGriaX%gR>w#AU==t)r;7s17>C?l&7%ZjTf5DaZ5C zgpPg<$=0GwGKai0ABz{%sZRQfNg%H;N~Q6Py|f%LLOJ=5AXEG}L%$c0 zN<;}tjA>Ped4cDj%(KMT#M@2ylhPVq1eAURDY0CS-luA%8uw)yq4hEIm-_Pu+4!aU z3iCV|p%En4o>nd$ToLq8^w2-Z92?W?e(NC>A$Z~6&C?LuuyrlFie>aw@To{5c;>_Y z*c`CNLn+Z2Wxs z)>6N|?2~? zx`>1OQx(@`a+*f_Gp{sq6fgUwL7k@tJg#L=QNCr*lhs7=m;`=M5Cl9dPt(>~iNWxF z;tgx{4b75e$Kd!$1^Dq{FmMstdd98lCJ(h7BeBkv{WQsI>U^Pxmjlkz&29}d-ZYDw zj$VFYSlT+uYdTy@PrEeRIo+DcmwIg zSzYau))}gghHoSn4`SvCSTp!3>lfCln}jWst*EbhlFy2^4pkwKwGwh!VvO_COodUN z`=Hy_BB%g5ix4uN`$}_+=8Dr_lrOHen7Toq)ak6E=1fn`DB=Q{AX=5XoIY2qY$gYRxdsVJ zBq}|PW6l+{e1QQS23vVTv8af^UKK9ihf){Kn3K>H0_Bv#@#2LE96Xdk*X-unei zO2BMt`Gl4qeviDTPpU^pgMglnTcsNTm-Cwdv_gP*wUaL>Wnx>*<_xJX{Jjfqd&JG|UYnUF%lZsS6+iftJiC6CjXv=%5n3p3 z3y~T@!pB2M_B11kQ@E4ls}jC*1)o)`4sF^1$|ZWU*Nv!%&&yp&NGX|s%p~;?;v^f3 zG(*^_iB$a(x2N&yw>f(w%4*nlF-&nWi^fED$;2Dfqc!ow?JFS|C zbhHT4N(u0UxZggE_)yoapRDA@8puJaPZ_1jDEg$qr_4dBmCd*cv}s|r^es{;&VSm^ z0b65^^u$^pZI>eiV}ziD@cA)%g>yeh7#lG%!N7%{bY*R>WUbt-#ctT%xkWB$ zK>jB4EquILgN18Ie1AA&vDCZLj@`NV@K?IgxZdXKfO{+{lv8;)S--`Rfwp%JR0K zzd$=l-0oH+CUBV_t6emhs4N1Qt|d~qQF@Ybt>?LpE%)d^s{>_xmByT51D0Km@}O?Id9eGE2<>LekzAbCB&meimon6u7+z6qc5*Z z@@yuzC??OAA6CIjx9YaUmpb4YdVZ14j3fnT_U+24B)Ie~7&$F@3z90{6l$L3%3$Tu zb2T2G{-wETr`fvo<-6&-gVffYz$0r};)U0R)tk}8)_aM6INv<~5dO2);d2A;{JdeN zVYWr4mCljwQP=m*m1nD)g}EOEX8F%h7;u$FY!a~*;oIf$#titNU?wDSj*Q02Xd$~{ zj&peM5UGxcm*vQ?Fw2qa^4A;e>q5Af!~G$xUoa;(AX_t%=biuIIlgP! z6rA9ebe{>;#JviEEwx`1LyqoQbW5F5P;)$F4j|iMt84UEc`}ICAM>F*2 zUO4+bmaoLed)`d=l!>?5Vgc5Ce3~f)c+d3U*c+UCA6_!FFADTL_OUJaFvI1f@Y8xL zJs_LD$`8!O`ud0iV>Q-BKeI!;NwIr=Fd~-WK*5kpDouqphw&LRqj?b_Mb$7> zj}Qnt2&-haSvlvdd>z>@oqEEjG^60GhJYXDdWMAmNVHhA$XPmD`09+ly5o^tqi<8| zma10cChM+?i9G5xt8(0B;hnCT-ld_n(p3+=>A)tH^(}11wsBoFbH@b=N9E!5fG_lY z6QAZZEcQ0?WAmOLa*j)i#F>gHTiwDvGIx#0-F@BI;*Qm>Pn=Ca1*ukS20gV|>~N;Y z*(E3Yk~63SyL@%tM4_R34@dg#vP3~=$E_)Q2N;cK>ZesPhpPj{ibV;DQ|P3`C_aG< zt9#aAoQN#LoR!gF(Im7sdI~%%y{Qoh3qu_>1Ivgm)yS!Rd1t5J(odI_&UT4@--{xQqggO=Qk=5!-4Zy9MVp^*rj}846((Gh%7jgw7zzi9~?%p#f zNv6_A2kKc(C=pVR)SS0y9t^UVCDc}_$zF}pon-4WTZZMbSX1gAWQwWQss$J3?g+>c z?oB=?@H#wNV`Hxlw(#b6qfBMXY-dKP3OF6(d~r1E)_ta$nbS71F6P~SP3^7hpZ0&e(2NwQD;Qt>0 zXF!<0iKrvmi&QvpG3*c=gl_D(lHPH{GtX>K^&->m7j~XF?f%r=Xv%Ktn8)k{^ADra z0}vr6k}ek?PA5k*HyYeYF*E9<5lQ+O<>KCbLWy1++wlGg;^Y?Rj5)OPotG# zMyANgCqX;My5Q2BwK*GI54xVRylHtiq!?X(TX9e@mPuvi-8i8LXlYcJ$>VgnUa;BS zHkaFGx1za8x)>u#+a_EC+3ZOV$uRYVT_mqFQ-sq?(!tnsOjGBp%hmO&sQzz^ik}-5 zKLNk$r~a%_@o&(adr2>_lbcZU`n1`0{G4&gJ=M6JVqO~AS_lma?}tVQ2DHG3Rx3Rc z5{ew~DTvQLWFf8-e)7|_uJ#6vjEilfprHYeTSm_?KJj;+ThF_E`<8W+*L8d7TKaa! z6Qi%ZZa+~LUvuoa4$`8oz53~UZfhGo&`baRXzIeLspEfo_PVx%%%Y4!`SRdAlmo?7 z*oE%*QQkL>_6(tAdrQd@g;_C*b&63`U!QQaabe=6x#eJyNK;JaqeBW}P=;O;jAMZth(4+D$qc(GBWJ8Ao6x#iDO__9IbO(@7Y3QlWKQPk#PYzQ zR*b z;&|KKvQcFNzIty)Q|jB-)*iWx)R9ZbLx0%(=}NyajADO^e}5f>{9&5JBctjl~?!&52T`nl5jxzhTZTLmJt)7eQ+ zKEIgHFXr=$`OTi};#Tt{J=C+I=d;=Qf=?2Bli*nAoYCo#I;$IbC+ZuUB=f2T9nwV$ zVPV71IvV;xPHyCOI;4O(E_W9_?yA&e?t=3U@7w?7yz^IIlREzP+o|K%oxN(_oRwFd zGkZqFd%}dgsGU=iFS>{pmG((?H1n^?x7}=3IHjv>B_fJdqx? zVe|tnmp{Cj%`v;sky$0i`TLeWNllc%ARDQnMigp@K{U}CbP#~DeS^D|%>Lm*G+G03 ziNaf~@8+`2TnxJo95^5}95`^|!2<`d?nUHov9D<0bq`M#N(LgzMko+Sfk5Q~Q8W~k zQF0f3g;Kgtf}oKEl*v>0MuNV9h)-hslQsNV)zuE(ztLXINsEeoPxKLQUiM6GT3MibBeypE>}-%&p2e?Wmj=JAu<{Ao%FN~qV8{J3jLs)BgMc_Wctk0 zlsj&Or3Z2E-NdI@FW;Ww*Ua4KH<@@n9E~3GsJg%wxg`x2Cnj4 zeI<^iYMDA)nPY5H*9aTb=cQ*1`_-eWS&@%co$vQ(vc-*;s35RB56crY(c;Ftj1fsvA_l?GP8TGNpjf_0 zQbOr&Vn$&yC!H1zX34_1G2$!YhoZ1SBw{-uN%I)Xe)&U-u)#tsEUnt*S7f?eZjq^c zgZ<67bm<9Tz<+)m)nGt9as>PXYl24x9;#ux(nr{yZ1&|({`t71!P~_d|qv-T~$8@iOELzz3i+YDgj8u@Az}|ET@j`#Fi zwu35)!(x+S=fs+gR~ilH2A4<+jLqgNq$|vZLa$Nq7ZyjnImYZf*y-!@1wx2B^hTF6 zQe0eA1mTP#Okmx2`pqaSc2V=pDxaiiIt9Md?QiI z>>?iL(;X)=JBUAQ=K`h^27OP_#P<;!6aA+8JNF11YdYA1?+l(Er7!jMrJ*DmT*+FWxu}aXZwj2j%b%9y^Y35v|Qg2ipS7UlvfR)I=zv`H^10?&g_-zC%61z zUFrtXe?_0u>V{o@XX;&Y{#o%MQ~FN0X| zi+&RavGyFD7Vgl1+X}$1+L85 z0Jlkx2=`le3EQpDT3>}jIseFU*qk|zoE)Lp(9KaC)^dkiC%GqkCI#k5^KveBUhTY1 zxYc%Bcr&?=Zg%|H<^neaRks=x*-MYMZqb!DFC3?=MZ{+i zV|n@5sm}b0a?vNp*~~;eZYP_k#O+V^Pd)YNkiImkd#K zeQ>A|J1D-a1b2N?a6FlpX_%4Z^1V1C7fhpxzf1A+94^q@qy}?yRNfyd_b*iL2dMwH zK!2MR=Z|ZLzX6c3nBK467~hXXbAX&erjMp7qFTHgU9Rky1arR zB>1AlX+!WU;VAK%98u=`Clc(_ofV{!NxaYN^P>AtQQixp<(Vx|TpL91B;P+W<+6sw z&1WpT?!d~_R#JW4{XJ_(-F)5|TT(AcyFEFlpPhQ;=?7D(ho+Zr>D{yTiw6#WTg?7W z;5}&D*xyGt!}g@dAVm~KmO&6%=bMZXGbl2Xwo=tuDNhhi(@dI`nu1o*NcG8$n=>w9 z#{YtGFdBcsN?7`xnUTp%w~ophipEjL4*kS3^en?p@kBG>9x?yKokHTopM@)>-CI%v zA5B@eAV=sTh%2#$Mz|;0pSN(mOgbBh4ZK~Wnwip|`Ttd`*vf|T>7O?x^^-Q4`c3`C zHXTBbN^ef%zqV(y@cxNUX=}$A)|S3oI?iGC=RVYwJT_Xy!ZyP8C%rCDr6@#-lS-_)0!O_3<90^iF&Q}yZftbcOOrxx|7Gd`yTPlhAviW$kh6_7WCvA8x}6A>FD&7*rqGD z_Su8==e{SID}+6V-j6hn$sBbhBSKzPwPNgBXzFF?Z5nQxEUXgV666a_ZwqguDP!iA z8{TfxI&rP^u=u4SnMBe{d{boaa1SPp&S<5eF~F&8o2A;xa<^ex$;2Xyb9lUeo72nk zKS=fu;4=#f`YXmjpuYjvDJG+0k_1uIB$Hc`Foi;GK%bWV20@}C!7)`pF$vU6Ky0Ud zllC%+Y?4}~{n9~6lulDvp1DjW8v3ZMvLLt9RmsTDN%?=(-}&aGCELv0-o(iXp#zsS zY!Ssi+Nxm^TvNjW>OS***-I~s{d~7uR@54$20dLrdb%O>beYTfmp%>p&Hzgs-DYvH zPCSaM7)PZ+wK*yk)uvV&8D&yYrZRu&s^R3ovYl~SBpt^HS7Bw1xR|TySUH|tYWfoQDXv_ugye-L<#UiwLE%fl0DgFolm?vG#zZbeZX(1FXmjsH&mJ#&9WK^cQN(y`k1U>}9 zzdfxS|H%|>__-fnnv)I204_L8|9qbyE#S+ZLMBM_<>%{EYs)MPfW7MNAWwHdkI$x4}3VDD5i zZTl|UwiDY=`QU?X5DnbFUw!q}{p__@BB2i{WIq7OmASGTI0_s^juJ;U2P9*RqI{AP z46L81?c_?+#8J*JMA^IbWG;ViClQNga#q?oNEQKz4P62SeFN5TRvRaHC41;3aDqxF zC9Rp+(2yMvUM;Zgro^$*W861F(bpT(*v0bbG8;-R2WnT`G^B_ZT2@(}!>+W9w2ZV1 zMPh-qyRBY0Q@qf6k!`hAF;hvYw)VD-rgg$lIjM}Y4z`(Yp|=Vf<&DZ_;X&Epq;{LF zOrmHfsA92N%OnL!#WK!5jwEqut0?Rzoa`rgDjTOJXN!|Mchk*aB|Y0DO=%}RlU|F_ z#FzT|!rr7MEz8Zsyc-*ABW5f`+cB~eE`*uszyR9}8%gww=dt+y`+SWbF-=^)Xsa^Bw&CnK|o9D1M8VZ=!yl%wd#5abA=a zwC)O(1wk>2lp1A46!>J!tVPzxVL9mIuq@4Bkg~I*)am0$x@w{il8kfxXw=L^GmkMh znHQK_%#vB@bp0&c_2d4H6`A_?d_eK1E>5Qp*ku|{G$v~JAU7^N_OCvTlWizgSFaZN z2xslRkUsbXnt4S-0B+OS!T`r`yOJDMjqTpQb6B;KEZ52MY8j0@TP*GjAX%=HEH|cq zH)oDj%QiO#7fT=8=|VC`Cv%YWF!H;tomQW8QBGY1DhPA7h~xHW1-kp$6Db_<72ga>?HQZk^Z7$m%Sk#$ zyGHwx!Fs9QI9Z+OnC6<|o93SsoE*B^cPmu`5kYW9%tlYvuk~cd+`}}DPZXXw`wBPv zp&0*Ahz*#W^^Iix?HMP&1^JFnaxXA+=)N5&V? zVCZ7u0^u1aUHt4~y0LS#NB!%!Ww@wzsSqs}*;0{66(<>J?H+LfeS> zhSaC}dRpv2J+p!zjwU0s9P`|?POWpFsh;T;&6WsSMDY3b)sHjVrKf!Pv6H`oj`LN6 zgTFiE8$^nlnNW@;OIXX5pcW+fFX*@OF0=A3v+^#p{#RE{KV1z5y3XGxH;oqRU7P9F z>~fH=1R6nKCPygtY&7aXx^sn0i>@Da-t`S%rJhf%As6qxv+?wvSEjC#cH5k@cAmc{ z)zR^&K-MjvdWFZz_9W`l*GgZZ411s(xi@*!wD?_d8t_+ns5va=im_1Go$HPnilq`? zBHmA`@%4?LE}iZh8E=#(#_HqqrHh4S(mG+CbQ9br+y{>ce}+GMKZQ?ypZbGgDFMY& zKS^wquJ><@|2Zxec#Gqe-s<>B|HyD{Zf&eCK1r!}O!Q0%Psy2-J6W5Ycdm4fXI}i` z__g6{_K`@IqvqJk?ZQI-HH zKN7JERLPIX#$a~XgV|vZc5X2S<1RlNELT=_T>1_sCysK}ab;D95y}2+;9RHCpjO;c zOpBvj7NT4hqFffDaWu4Mt~h2tyWh+e#~dg+sW>O~W>m&&&Z-UHjpE<@RHMfLjtcd8 z_4&rk_7p5)Q`5pl1?*$^LPM7=DyCeR+dHFG4yIyN@j~&Rs~1(@dB?qfd^WZB@m5m% zJkzubIu36>{}EKMw^JXJ(0g;Ho;mA|#>DFCi_hForoQtwnYsH9sr%pAk^1nO(#G3K zbsI6=ka{bHg;OsV_6sn1xf|!UEvR#T$iuZo)M>U6XYcTo+;f!kb44Tnm{Q>=bIeCW z&AZg9zXa1aUpZmU5~zqDZ*vAKF+RF2udvd=(m92dY9_X4V!ZyBZ8>rMI+j;6F}t1| zi9~_zwD4)#c=Oco`Qb&zi)>5nD@|+cH(MXFx7$CreP&nD7t|bfx5Hs~*eyn9h(?26 zlflWpY?S;)qt_P14XIx+wm?3WER*-lajPo+tt1fXJ0m9l7-yYvf;cN3{*RC zW_Dt*CBxM!uCvs6ezh@cO836HiYc zdvf9hlS`s?ACbFPZam}WdsAi7?$OUJz5UIcg8Va&CEncVFvmo3JGhmCQ5mMF*) z@hnl8C1QN|^};$)kR{?-qL2yUFh+H=#`9^uP*~90UKtx&P+K}hn-rT^aGrUtb)M}U z_bmTX^D^r)`=#oV{N{pH!dmlH*0uI))Rp;H6kKoJXy53GWR_ed(YP}d4;te|Bo0s% zbc*FY<1h>DfVE_4=&BG66?m;Bk-`E}AbBNbBlTB$krHDh;uW|vkU)FZh#?a-@^2bT zkLYJ8SyGU1vzn!7I42TPWP>PB11ZSQ!#sl&36%tsOw`xo+q^*QKl06zZKFyeY>2A#GsIl{mkHXCK%Q5;L=vO0@` z>jl&^_WF;#?m=G{*yrWhgLF-BrE^t9XhnHuCp^E9zg*|vduHBIc-%g(=;KPorib{{ zC#=(+yLA4;(+0_g-c#(Ui<~qtTObwcJuep zA0z_?Zog!X2EMx}|I!|#f$uKLKeNZ^V3b57IQqc+{tM~ z-`^XhqF>+DGd-gJ+A9H#q{5ML^n2mLN&mFNAvQfZGxg&eFQ7N6UQ4WB2%}H5?b~tolf=z*2tdA&`4;q&U0_eA^sNPb6WgXc&V$)EK^>V3Lgxj( zf@#;^Ru7&i8uWcNZkKt_h+M4&j?|L5P5(&Hu75D9-_M_bR{xFdX8n8AHRhVZnkR8k>H1aP3Ldl3AN)k25iXxe?xkmOE zi7alH)9La0{C@O_Yqm+?*H~e6gR7K8;ZGbe8H0q^{LIzOL0Vc1nGl)5Ja>(D#B0FteEN1p)g%*D^atHq6;( zNlHofYbTW1MUBw8?C+xWq{79wekxo}j9e8YM%Y&rgxs0B_}LHhgMCcI_vLG&W8sp+ zPo*x{lX|gG_PJBfqg)TX`KG_-3m|M6aCH;d42wxWDn zNSy7kev@?m>sRqk8`E36)loTu3|B@N1(RYnX64Fe0h^VWBNh~ihyg{Wqh?^o(;0C| zJR%Z-5+WK+qGB?|bE1{qOyqmC1R4=h7(`PybGVW)P?#T$v50Zf#d7f$DIy!_|7z?@ z;G?R}zVEsFzRbQQGnvd}n@E662!qjF7j_{ag0e(W0c8_{h#)9X72K-Sepan*!KJvD zEi9tGrWJYXLczN9CoM`}gT;l)XCtoyNxpNQdv6%D{r$c`?wr|g&UwzW{{PQai7Cu^_vC{}1A~AaP4QNm!QNc(k z3s%!#j~j9NPi*Y;o0FMm+L`BPjC%3K)53KxVsB(r?@>0vo=;U#jr22it)xjcQ7u+e zt<}^Nv_Zkf_>h{BHLY4}%^YoZ&Gi2FXltr}8G0=CtQM^DfDM(vN+hF=(dVjmL|?Aj z6Ww3+=is3#@!}977(N31Ohs(!PcqEyvFb6=_KFG(|iEjjetWR0nbE-cnqMad;|h9 zfUNS6mEm;=%775a4;Xx=Doa>?j`x4qjX%{AjX8LBe$}uV8pVryaUGei73&ahV`i)FI%4dLm?`Zq#QT>)4KXvDMgVCX?gjqGFZp8GdE$7`#V*E9DHw8#jb*h;<}CQV)g?M*qkERrssurzMAzy~${bt262X{kdkt<}NYDakIFCCEv0q zP1OwQEY@rB1lF~3Frh0EFItOuwwWNs>?{a(=754s#a5;(s}$Bj#wf63l!#k=I^pFR zoMevT(S{X*uZP48k*o=1?UlvIqJdfq;$qcPpqnWc_5AbJS=Ipiavhd}fY`4fq3!%IWt^iYJH8vsG?yn2zeI?1GFfRYiJBY7jLt*jjTWH?|brGq9_o~~qY zyl@73d(PACi?&>|sm<>B=G9l`()o!$Tk^thZ(8yKx4Zk>2U;F@>&Bk1dp`U%`o(J# z@7?$IfxW<2AJ==7Ifk`v4E^gRD%^YAPD9xUW>A8o0Y}rBQ#iUDjJ&?mK!$=y@qZ!3 zP^>SZ2$2MWu(9^Nzt@SR_OuQpzXZ!YTvAaoagqO`@C5&a z@O1z5@MH92%rCX4%%@_is72*D^lWAhcayqMTcvGOw@W+Z?W!75*QlS;jGn&ASYWI) z7y}XAS>vo4Y>)^W#9;&oK@5aoWFiRS76j1?g1!r8_6sN} zGz+w#LzZ7AZxg^e2@Ve*UM&oW=lAplJ&u4B!A=&r7ik5K)!@na*+pF>Be}@K&s&8$ zv$YfdhX56el85+4VTYZO2cmdi5DHy6nzodD{kwxb|GVh(yI=fUa#M8Wl)IjL>W(@0 zqdUVp_oEUdzkuk~n;wtPo%i~CAO7JdSmzAGvh<0AJw>nCD`cA0vRYodSmPQ34T;O? z%j8P~6B5_aGq{=3wSno0j^z8?2mV9RBmN_SuftzPkAQ9oC6l=rQAnd>L^}!n=#17c zG=y%@M$<#JVS$l}%jHShb=ndBAEA?|OE*!F(G}CcI!6&K3M(DvET!|^PEb1aWIeyx z39Fd34DSVJ3qd6aLDn=6Sje<&Yr3_{!g`5FuA`kSA5jSwm}8=+ES|`@1=`AjkdLgR zrIU5EymGVUm6gfBo(>ynvCpAFI*dHWam)yix^j9iye8}yJ`s8amdHnozzC(Vf57eu zrOv(rnE^*ah=I&ROY`H3G!4l=K!De$T;RbbNNiw@)70t4e&qLzsv$MhvIb(3 zgCHl0A^(GC?pgW4O>^GAeflrzwsogoxM|66pIN^2@io7>@6?m)5p&P@3uyi1FxvOd zTfg6X@SQ!xB8|otsT6CPAeN5b*x@9V2+|XoR<2c=sLW*MatoxHiWqcyl)zMV+Lw}1 zNdkygzK^(*ffF%yfNw~2K;i=5nAio0@xE!%OA}Z7=Ets1EajI5PtYeKCKW=477mXK zk?@)cC5(0E29q{THlC0LYB&8H*(IK4@33L1OuV5V@?*6S_IPV&HF^NICTwfiKZ3wp z^GKx{aqP&x}nGx(v zP2O8;XV*I#AXT*P30QYWS0rG?0hdPB{cH=gez=^ivP@jOQTpnRlcUzN7 zk#MoffS2*qf>Y@W2xX83KxIJf;+ZRV*Z%XR&wIW`fxmr#baeWty!Fm&@9REDk5>mz zx_ia5Xj1sec9g_6R7F)iANPE3rZ(-q9zC??qU$#jo8!mgu!?(+3ZrVfG$0`(S{Lmf zwWAB8kEy@Xp4G&dR;6u@c0}1InN~ZN%$JB7qZ$br1?gPC&oVqEuMeO=uOH^nZ?i6d z7$!5UIxZ$lG4z87D&yM*49-J^JeNr3*HI{HlieG&HN1PN0A!)6APbcy`aFsI`i6WXNf%;0P1JOd zP=L3tU+<6IzT}c=@xcQxy?FnA=9l-i&CL(H-1lpF*z{}eJ3R~UybF8AGhg7HS4!2O zH|*((!UbxTY~T`QXn>bWq9wITMWD94pbQF(QicU430ElBD<|b|gL=R6+RF3G&#%0s za$W6)T47Mxpz7w@Val+wq1Bg_T~<9?xVG%t>glzsY7bT(E&FHr*OgW{#0PiLTiUA< zegRCenWFlGAznpwPzNX+$j&j~ zCDb9I5E$H$4G{S%I0neVN+g1{04xvQyc8p^lK{X3eS-pIp`k_qOk4u5hx;dX0Dx>P zBzLRxgYuh0$3yhO(1y_FP;ZD0(E(WM04!+$7C(@65H%(~tK+1vQ;H&&m|&s!?^g9r z+F{TZtOpt{!quE~;sA;U^*}?|F*Z1ST%_1>Ys-0v;kI1Y*#h9POPm;he;W*%%2V0-| zRrgi&<7<~sxc}Dfm$7!Z3omXHK^s#7dfJZC@|nb|OB$2iTEJ;9*>Kux1K##t z>$DPpBM(Obj`+>po-5t$(c8K`dRw<$0!dv6)EGGOa1`K(oDF-S2RC%TnGE2Yhd@1O z5SjiKX`Qq|+AMWQpGe0gfs&HaLTQz>-hJq>)GNtJ3H$E?OEVHrAV~H(@U6EYisyJ% z=7lUrvFq6l>}IxuJ~WT+*c5vJ-(y)P)T7BayUWkQ^0PAeeijS`>lq5xD_p>N z%0v{{vEmPfU{MpalE6!yLtajmZ1hFN)uXe2fXs;3`>yu(cJ|Bt`%eYgic<#(zwn8k z@n{IFrVpKKFLF}n4oFpX3JkiFmpYS1cYTnoqK`krG>lBn!nw1uXA<$C0kxZcr zHHE-hft-#~3emnwW>FC#&IetpME!vm%ZC3@i;`HnX{<@I)ooPs7!o>@lBEO0aFW#Z z^=nPhA-L$GX;g@&97mEa#3^SJ{h&lnW*9k*Sx6a#bV*p#-gA9=P_l7Qd;J9uk7Pf8 z_ucQ8KdO&>kezmF!=5oSh>pa2iTMuj=X-ZJlP(pp8ou#%%QyA_*jPpVR=nOT;Lh=r z_$d-&X#e6)@Qmbf=p3Yl>#h_1?Ifh|r0(O@~d`A++tOIdpST^gx-etVmSP|ik#F27{ zqEIrs3mLY=v5<;Qi{#$W6vV=&18-Z0U1=BL4$`MMeM_>GBh-$NIwDMG5yD z8s2pF08q>zR+K|Bfc;4_LRb+)fK5T9PeoPj?`6Yh6$=!|lk2RJ0&v?2QUJywbzda} zM@!)X$%?Zc6dt-|VgVDI7&yB5er>dblF}UvEk!hfhs2?`(;LQ?rP`8$B$kvk5iI-G z5&~&|e`^YEwv@RYU;xb8M!6T1=6gG~mH}>ZYlz%@ywwCCJidW@D%@;QJTNQ?^2t~E z4zWlKgz&co0!?rrpq8x>^5(C$#GRL;*2zw0^Gw;Aav}5e2$drND;xCO=RI@KYajPK zzLML0`W3XfXG!-AI(bVE>AiS6-T;ljS{Y+&#a84j%i%IritNN$w!vx5P;3d#(*92G z1P*jPFjE=_I}DY4gLB(|fODI67%X-cJ1GX2Qq>>N3qZ zhMs&982ljive#3ngF22ysmSVocgE_M6j^;otep5jbb|x0Sl!#}m9JgpPmN{IR{li$ zlN7YeYJqz{{2@zldpi(Z950Noz`lGr{XZ{JepjWKMR-VOV0nWQZ~7A6Anpxf-k`)I zpC`OQ+#AHaf$CMgHE*DM1H&8mJ@4K020m|Kc>}-a1e)H!=M5}xpm~Tr(Hlsl%r!>Q z@>#Z%?Ueo=K9b@-;7+9Iu$U^BBJq^OFy*BQK1ked0rBOrs3{-FqIKB~SvretkDgs; zA&Z6N4FCo$sD?oDCO{U_a+}fM5e+FEFg+I3K`amOS(L~@t#)ZdEP{Q3utu`$;wTPZ z5byhfIDA2zBoQt01#$2Z757RmaqFKg_?=>GzkzAV34lkss{WO0o95lpBD5SS1;r6pWGbMyh7*a%TuwIr&E9FNcoBO4-{BOMVo z!qD}>5XemkWGDpE6UsW=56f~XN&hyOk$(`(SVh4M>uR?S+xNrZXl&R#Q4D%=!kaD# zaL#%n=Rjti2H^xX+ZW-4cuSH+S&$jttgv`Jjttr73JjqZw-F->g5eg}ilk!g6E_{2 z{`fdkZm*d;;>M@hiibB1T{vdot=%`$Yi^i-!GrH~1Bd(K-lJ?KUU`j*qStl=BQ7}j zC~R%gFUOuqMp1aeC&*ECI6p$1#7`El<7bOv-W=i^5^9JHHAnkKhlWO`anqzr%~oG) z=+el1ZoV|bobQ_-ni07f1tp%-rZSgtm&sGrdCW|1raVuT!wFWfu%ar)4|REnqVH@! zgrb=^WH{m=IznF?Q~@&0ZY{+tiow7o2RIFOTmk_MI&3|Y&G#1&C742r5D5D^Y7O`V zYb)}K`6Nk+$2y3M6CCJHppc!EDR5VmGVxZ29n*2qYs7NP`L71nC;?8i^(D3VD?u8cR`b zo2lZ&$HhQLBhFQ4oAbkS;~lB@0*4}pV(*o920BX*r+QPNayDn?f(`5ta~M0y zoMIkPzAWi66-#GA2~sHKLkV4>^r$BgQBNSEF1-zpG6!U2%CfcvGy&|YtR0K88DuSN) zVG$Gn7Fgj!5i~ixF@nyjg7y_bb6uT3u75gc`a*H})9=}XP%uC`8dp?WOwo$3eQL;q z*WYzu&P|^zpYlLIYvYoo&p*BR#w|UwxmWKQKmNYnN1p6Cb?+rZx=%4r?c4MA2XDXi z7orVD^vq@s<8?Qw1X^m(Q|KIB6FHY2O)pdV=3sMlbaY+mhEgu?&&QifFZN#?pWvSm zzt(?ke0u4s()alfe4p{3t6xOSYC5gvf(5!k9Z3&Ur_i(MkJP_LJ`H^y{VaZ(HV~@? zVhKgi`9OljiyhYMDQ~@tD~)d2lOjbZ1DS?xOgB~;Y$>EYOJT(gNP8N6X-@;vo(7~n zja?4rj@&1!VK{sU-g$W|Xt>302kPRU)R`aEn~Lj?8PGS7-xMIfDTJID&8a?=l>RWY z=_WRdvYK8_v#aUHOG~vO%Pm9>aubCa%#$0?jRPHYs8 zRYjc|ok2xWV?t+;@doAIAngsxya8$bXs?7EUpkN;B8`$R&P+q-pXrWtuuona61IgPPptcopj%2q+p*Mdt zMcx@rk#|KQ#}b9=A$bu?12R*Y8giYNMBzXUsv+M=-e2QUVro2{Qsdxm zVbHaMd`e9W&Y-NaCO>^($3VJy;HrW2KvG`KP!T6I0eMe3lTSPSLCJZ6v?R&7r9cW( z83U|~0q1U{T;+Dst^kz>lyJAc0*%a!^e0{dC_2EchGH#L+^h*3;oPxBV?7X9F4snC zqD6tsF@l)Lkyhqy00ATMI6?Z88wUjDV!*JUYFD09TFwP(D=gDz`b~yUYpFOTRS9v# zor4dh0erViFOO5{a!nPh}~#;K%PLOfb>p@$~t03lFKZuRQbRFTFcIpEea z51iFFy0W6OA5AW`@*@HP{zo{4Pzh?@YTUhi#nOiC&-Xssa>3x5pG~;+)hX6y^~TvN z=7d6Z@jG67c+%{>x9(Rl+)frLa@GLD(#G2nPfn zD6}q@whM$6y7U4V$%)k5fy=Hv39?7rW#U4S)T`??JHddko1Q~O(4Z}|evq|+>ytRO zn@(cUjy1!TjeI1}Y`$4UxXOkd90e)gT8+@P88G#<8M~zEnt8Q%+_7!@c7Lv_^zrrP z`7@uOue}cm^Ll=IU-!?))W%3w4eRQ|Yz01v+h3w$1Y{5l=joI`L@0B|?Wix1&-qbC z^oLaB4=LDyS$O`a`j8iwhCC-D)EAeAvXL+eAY%}nh9NEu`=E5%hY<~f@e21vq~U;D zM0IOxVMtem9U>;4vfeQ22&3@W7+Kg#5{$-<$LNKz4YAF!-WVHm$_P#g!YMVVS*g#E zl51zAqyy4niIqGYAYus6J_Yg+otPjZ>OvIn{xhn$_4(!S8fYF_jfi6bQpsgaQH6yyAl^pKLLe zcE!43{P_FNZU0sKi1{rIH_{JwZ~N(h;o~Pfa2H)Tbr7#;jMP!_ipumL2SKi!;W$tp zkwJv&H6*x0Zt}CQjq-6DwgPPyVe!J8NXa6Pc$wlPkwY|>fqnvYxkLNRL;J8)5+8#6 zweg(|9HP=zK_(_mvkFqkm&l9cK;tjnhHsI3E0e67R9cp&s_+4V2PvJ+Qz3l7_YT^( zRrSkLDSR;0YN|@AkPB3UJc1f7PePOE$>J5#EHsOrEzXvfQa7WU>1E1KH?T1&5C z?iTJ6?~#5@JtF-~et~*IewErOY?0rj-jEMcAIM)(pUS7GF1Z%}IdX&w$yHQ^+$gtD zwk&bBFO=u-FBDh?G8g1Ah(O4!z@s$BsS3gP9?Idl!N2=* z`*Ktrp>7nQtE?=DVpfs^k|a|MO=le@1}Dpu?9eKBL6#T_adj$E)1qzL(kgP{7Zl%a zbE`O-!y{Wt(KbpeU;LSf<*`__yS2MD7CF}0>UJdUt0h>4vwLI^7MY7LKJ!!DXW8R6 z`0Ek+UC+G#>dYo1xvyU8xq+?dzT>(Dmo1_1BDFw7u7l0(#FFOYt|A-?SJIrg9!l>e zs#8HB=~&2La!&CTP_#@@;{6MUS#N@+<$0HrchbuPpLEDPjv@AWPl$a_2)vs@{>JI| z?FhRG6d#^-%Vej|WY3B&PvrBuB#3=a2&xMe>GEp8UCvgq((V&Sy+WJYlG#c*PrDZA zW3TZ1@wPKti1aS+a&mHlgPrHyX!?D)0tV}Jh9)?_lAG%9$+EneC~-=E_ReG`&+@9|=i^e;$FUU4E0Us%K9lk@ z0U;sAl@dKeWrZ3sr{}2#VTgFHelatgw}mm{XyqbfxHZZ*)wt9*SC}DQ=Uc|#A}khP z;&&T6eBbh?q$G4 zq1{Mqf`+#!G=f7K3ix?hu`1-8bs2l9Jk6SCt+4K~WJ_l8h9%44EX)tNT6MXuI)~3` zk_#BR-X8uRw*w4txCBm;Wf6B@Sm6kesFY5 zS8Hno`?2^x(L}A0e^2UiRg(qB{|`jA_p_8#6Mq?|{6Zm%FhN=MRYiu?OJdbvBXqX+{hxk_N(PPW4+GbXsbtXTo|kt&+sxKK`_lS`^LA|NX@B|I z>c3zW{aB~<7JWnaBX94cXPr7ouh@QiKi1U-_6q(l*3~ABNV6kXSDSqp0$V-<;QC6e-6|v5DIXhXHtX!c#j2@96QFhY1)ITb3>F<~anGd9QwZEE2 zWS_T36_xT?MnuE@AzX4!CnJNWXpNF(nulINM6z-@m*+N%=NUniB*gO)$1>R6H?aGq zA;Zv21$%7%x!eK1yA-!YsDV-3Qu5|v0XNgm|e%9-`bgN z0kk()=v&>ZY!nP*tq8*I9PtfY9aC&_Wvg9}loS+bT~ty~)8PVxf64u=Wd#!$y}<%X zmlY&C(buYOGE_C722bu{yIvnA7QT^G-kC~dpgVdV{ri*s61CZFf9d%dy7$n*Aw8ed zRjB9t;r%bHKh>jl{|Swn+|x>|ep%0W=AT%SV)R@bPe&YJo5MVScmR$D>y}=->^djq z4~17as?IB%B0v!;$Z^|!eZr^~3uB2KFc^hNL=7LW@P6B8q!e3Cxk4GO%f$}GBKu-d zliWZ)3(Q=6n~@+yjF0X4i9%IilCeoO* zM=hnwSLGiOn(UkGpB$X+o9&++T*fcamRYw1ZVBG0-DBP7yU%}j;1T&5DIOzLU4C?haiPJ!QBZ24=%wSg1dWgcXxMphv4q+ZaGEoy*u5# z-}9dD`*RA$VAXtVK02sX>zen|C(EoEl$A(r@gCJJ-7R<@K$ctn+4hpF8~3V6x14mz zQY9BA-2IDcI&Oy}EE>&O2{8n!@GXi&W>*YC2C|`dnPf{`21@h(?g@Ar3|B=++lX?K zTU|uk&|Ol*?TG-!DT7=RHa74M1B1GQ#7TKw!3cY;O77?9U~_xl909iaenpxJ;{KR9 z>85Bs6EeIxVnY?9SdH=1FCTUBeUQ+Iyb36qbOEe>1E*?;N+r4WC3qxXxY|PX?y_U2 zzN4ZY^?f?)=fZ}uX{yYKi8Jc--eG-7kP@Jp5}*7rO?=0^S8jJ+5Z2LXzoKXpR(*Lr z)X`WgP12oy#{R1W`oc4RA(p895dSJ%;3d(>OrQ|Ni zE6(Ap_JqC^V-Ce9506zFG1+H)WV8-u9!m)*%PmLS6C0k?wQjVdZ&GYgf5f*3KBWG| ztEe|c3DJY*6%0y$?v&Zb8CxADXku{K4NM3jN?$E@=VMkSOFq(pj;!|R&bjtS?Na$M zu-guf!2`xt7_5`|I@{;rYo*|<57D6hLz-$!86*<*HwTZ@FAZZ}Psv2G2O(4stiG)B z1%7IuZWLg_RsstObm-_v4ns(n@N%LTq`^};-X$ilkOutJ$2YM!u}AB~Gz0@g~qc7+-I<0ZeyHbG0{ z)6mY8UQWczoKu{u6`Zn{LIL?wzb&)w4*4cg<{qzOVM})Fu1b!$5^S zDStCJoPtYAWo>WYRcod$PhsHD9YhqOe<5xX^DGy#C|8lv@r2XOO`iedo?euwwv=Er z&vW+bAU)l1sIyTRzh#-w?;1X~wc2CUJYVJ{d#{vH4_OX#%|`s#c;6tlM)@j~LlMkm z9ZF3suS0p^?3JQPb;xBN+-D0$ApvsLe$On9Ru&{T6++i2xnbL56yR~5h%Bf7$%?MM zdGl^?d1jFx4)zI;x7W5`1pz@*m`?uJuXiA($7KJ5vN2HY^gq&!|I&ASlN|<%LF(C< zSlLNh<+U%yf)w-*i~{ZS{b9!h{?QEOd-)Ow2&b zTLo_>T1Ef>C}_&e0F-)Ev@TDn081`)^|B{D)c5*%sJ&aSKDsH$i6w10xe# zI~!+0N`8GyT>~m;dRZHN0~-?yBSOmm6ayL+?X9fL49wq#kpb8VdX+cHXLU9f22Dbs zZY&E6Q1+CC@l9_O_`}M|O2@&$2<$O{g8?|;Y#eNaOiWC448Y+6{N?H`FC!Zp9Xk_n zwEtrNZTs^|^KJYX*$96(#K-}Zp#8J(TU9LVOmrM?llg}ee|6)xLu`zUbO6>jhkjrE zPGto!0G$EW0j%M7`d?f5o%gpV?0JXb{+-G|$id7+#|~6; zeN!I>_UJ$U0SBF(kd@;v!@&jsdi>Y-On)7o-_HKNV*GO`{=Cuz%69{c{h9D*&ol{v z69*iY-=*1sGP!@wDKMGse{4<@xDA1;av5$}0Q}~|q zg276m1}R|mj!AS4XM|Lbr-|XPX>9zE)(LbySwk4RtYVm40yWmn_c~V)!Xp zkJz>|kwkgBYnX-$TYK6^IgS;==Vou?U|8&fJ${bbd#3Dla^qLpR@`T6%d(3BJ`sLV zK$+k@c6Pm(RPXQ(J=52XsP5FL;-^b@{v9JybT z>@y`f2}3bnU!o`ou*gqNsuLsELj~=Wf(ZwI)UqJ()D7B$RcP?k&&#qpbY>SSbieF|5AAeeMtUw{Y3;eWB^zD4 zU6WrRKe|QfzFg11ywiZ*#em1;XwolrY{m2Z07&O_)i=E5ez-uJcjw+c>W_sh#b2={ z-)&}lH&4)N-RBGb$n1uEq}}&;z&4gx8lGe~t^ZDv#}hMU*=jnJzjzAbofry;ZwQ-o z!q0c`4D0v8^IDB2k1c}{1hsb`_D4bQ@zWWt`^>fR0zHsA3g2OFQ^k-=J@4NpeXryh z!@A7->ctYW6!HK8NM|?bJ6B^bG_$2s(9VOLb3RU_}#j{ z!UA&lr#ksa0;;we1I(UHY}h1&?d-7}H*V*(o$}fQFmmH&EK1=!YxX?nk8#$7pkOGG za*)$zwBI&xD7`BQzc#*4WWW|3&a-c5LskE@&n?1-b7e$#On-BN37*>rhk^0yi@~Kg zLJl3R>CJacA}~;ot(Lq4L^nm?Eq+8EgDE&^L>{-UV|BvE#fwi?s~gLg_gE9P_A_5L!oqy*yDsK`_AI8!#YRGr;nf z_(lb0XM$UEgR=s^f9Oud_=K(u&4DPZYsVS>9fw>o;G%z>{-j+&u{zZI^7ow zpbWB3^?Dx$qaLG`;ReRG*0!|BVU`vw-j$DspMWq5?HzJe)?gQc&ZTuPzPt8JV}QyN z03GpiZjhW%Wd*VD#+v9E5=gg$)7-NeO~TClkRCcc$0M!n_*vUJpBJNJ5^D7-pSh7d z>?(z|5Nb&)By|fpl}kV8V-isdKD^t9y2Y^x)Q^J?vR&PCzE>Y3+h5n9fzQ_wt@c9r z)%(@=&gJIX)LSt~F~PI688waDr7xHa*2OJmnJs;hyA9!M&iHlH`wMFJ+3ext{cVz6 z=P%>)UjZ0n&fLGCU`1RYrkut04tLT${jFhXeT%*=Nm?@m{?JPq_dtFO|6y${=lwEu z0RAOthc>}|GwFi11SeE4-`i;K&TxJyk$j-H+#!!jsIzH|Il!o~jcHVnZGR~dT`rB( zAx~Hebt&yVNO1nLUaCoObjwg(}R+wtA?3WP5()7u$>6|aqy@qAUPCV6&Wlo`@_@_ z%Hv#d=k_CEdJ2QpSI%JysSaQX@j!R z`Q=@b)Y3A*5n{;-%=MQ8O{Msh;^eZymu{}*ct$CXuwV>bd5<&Wn)71|`4!6JAfCn( zr!=&*{kP0nu_RF>D6podzXAmp#8L9R%Y6Mudw$06Lmg6|)neslf*GNO%23iExF8SB zqk3y7JopB64k7*AF#zytOS`B=#;zt<2_hm(c-MKZevr6X`p`R-k+UX}I)o2_FUOXD zV(MNY?S*p)x*G(ALJ?XH{3gtzeHOV;<+&?)ugF29zgLgHUv)jUtfU3&f#zN^)KeS&YRf#S^An zZFwxj>}(N>b|2KTvujT>Q4lrDSR;yL&cQInI0=o)1b^P@98x^eo6*s`%2zmC1lTD z-USCJ%`gBr^{z?#dEBIc!jbgzV~c`q?RU5LRcA0wL&2~^gDQNiWYk%%66J(JkFrG+ zen&JSx;el}B#Ij`)lg(6F=Ys#fq@8rMycj4j`VHD2&JA-fOF`pw|W-5_(;5TFwA1Q zwP_zulMl|>nZl&%wc1@O){luo-$L7s-knCDF>={51zhdKnDiD{kB;`y*}uq(u&~|9 z%002ovB?_4(6$iqe@0HKx6Rcsu@!WF`LSXNbK*ouKL4u|+cl6X2g8QdFM1S8{{Scy zrOIieapPczHj|rOq)wHYdqc*Oj8a+V(hQw(kb7-PHC*@+0_}sWFwKvfqJ$IdM_w1& za*X{4oF&M8gUcU`@7d}mtrD~1H-5l(!3R!msReGt5P_F=POIql*G!lw&jEPslHC2t zwawTawCcN`dSsW+@vJ+@ks&O*V60+ux?I_T@(8;Qon%Rn)JSsl&GH`Ro@_zBw|8+9 zdj&uHyHvROzny;&7c_54{~{j{;lI|+fQ+>Wsf`?b*$dV>VPQ!mqZ4iwyA1k%W8W=P zs^aUou(9*4-&$w0ew)s>eTMCBn&-#~zIE%(yuy7b=8k+T_`P@;7-RFmgFUcwBVwD} zE|N^`eGm@BZ^Lw?Vol@dU_9xzO9buh&L~9coVDGU#Imb+(L$ z)Akh*W089UfK%8zQ2ML6D+Fk9yk4hq3*!Uy^_ZWXa0NO9zWp1YnWI{Bg1L$GwcBv! zgnE%&z9#!wAuJ=PSIkNrlSYRUwdXrd`doi=Z?$5!LsY*JI&sC|-AUiDzejE(A|51E z1xgsFwuGfHM!XIb5<#|xy3f9w+25KJXvR^S=ri?;w`Tbz6+lcew&VRA|J)nCVq6{3 z&9MTa{*?f?&&@XSOOjOkrt7lX^7OL%DVJ+Bq&rd_kC$wxSvV1<$mSJLzgz45@H#>T zQe7u}$~ch|PI#wDiaki8$ZkyjfaLc3hp0zmHZ>umskJG>75-mi!qy}W@){)d-s*#` zovp)556Erazj)fie#uE;jEkXg&f2*mW1|}KC`{4B2m~VCPW`boFCpL20r&i zjM)vi7P#)Ao%fNSLDe2a8v+CfY;m|mCgO?YF%ix(giv%otnjz_Im)NC1#1vxkGO@8 z`;;!zT7?(qn_n|Nq`4!=c3x5XvB~psh_D!A9NnXAUrW&h?U83oW$!lzTY=uOT*IV( zpiji(#9S2DJ#RVw)Od}s!NQ5+?sK{MIhN8f6F1p&nHz6JA3yEgZJGhht$a=b$~>KR-mp61(v?s#Jma`EqGl0$7U(m%I& zZ+i4k{~(n|Mp}S@1FAt0Lbl`}=(1up)wOs}jl*80hmM2)Aiaei$cczBk|gNSWpyy2 z`ZeAc5Agh0{g7;+3eLZ6;D(s|b0#-;V@Pv59cveZe!&w27M!&W7;h0l9UbpmZVBwgw>bk8fH!YX<*|a z@q?itBZhr;@YM^jRpQJ?K0;M09djDHG{&!qI_?*Y>I|N}U44#fAJHR6qlF22@DBE; z7@wKZU#^bmL*PKan1>z^n=9jBeysXTK-#x;{Fcc2mYD1fg6bD`CXWgx#p;uVfQ*6n zGdK5TNHg{i|9ibaaN+xACgB4>rGh6j5wiN4nrlKQCNn+UUv{i?-{r>uG)txrK2&$3 z>KOUlZ*u<%=#S9bQpY9F=M{^_UdlCaswKV|FK;K-#O&D7%@?(A)c6|4+THTo<02uF*tmO)&}@To)6UeEPzO@fEPYdhQ&2$6gwEr-MP7W~(NsIsX3u=;<86N(qUv}D1{PXa z3=IuU2-_Bnd*@n9C5{f6=wzI{M4qmhGlXdiaVI?{R{Fk%cUzrfr{1e8h*7BvzA@vpGrLX&t-!a1l72qsK&RF34_xZ+3uI zc7IBt6ZF;u->d+=u>h^dj5X`OouGS^FP^36N}>EXg+gFSbr}Dm#fR{M$z?DZ@u~Yu z5U^IFPzr{$?!Y0$O0X@{P<~vmam$0bur|LJwSBF+d>6IpX;l|aN!el-wG*>yaL7K6 z`8_P$1FVc|EZV|TUnhYquHIDS6;g}neQN6;LIEM&WTq(QsRniKwHi=#a&N!920?8< z$Qm}uK2eUq-sx z5Ij- zPuwse$6x%8K^zK2JE5VAlv;|oy9bLLNoNxhMKU3wyQgO$Zf0!ozB}e3fMN?lwvQk! zl#d(B!-?_rT>p9F|0H3pQGi#(8Uxw3)w}_M)0s)?)^AX2)Ync#H|owdj$+P4E9VL@ z*7+p@0IEkV!Y4xTtsAYL?kGIx44tJ@eQvzt#w!sM0~jMaY7e@;fVUiDM;@fg9E8yn zq{;%M>V&#S8q-mCyKGjT&JicPxupT}>}#VUnx57M4F(l5u)%GcAY;%q=uYL5N5>K566HQ8%* zjg#el|Jq`b4GJ%aGl+tdH~t;ELjE+N)XxKv^BB?`oXBwg$Nt%w{E8*&ZC=uLQFuYd zLKZUk!Jb)Ew85}aDpfJ^n!Ibhqvb=7p{4OpecsxJpy6IhRmcbXvolYXE=mcZX4166 zn{W|`Ye|=Z_31%9ok~qeOMRZ&`S@bFIO?r@k4sKdhcBILB5W2{?&+M%Pr|0jQgzOJ zvZ;GKDN`arhF5Lz)*VmbXP$Spu~KK+dAgFL+bb+Lo_C+k+BoD1z8X-Mv7+-usja)Q zqa_l7h5-05n#aUf_RTvo&mEXrU5?aU&GpG<$IkgSQ)6A~>sKQl%vYOcl+NZuS)*qT zOS_i7b`vGwRFA5Au@THTZ-EqoSYoZV#Ew?~T%s2yknctcMBh5u;@UY0vmF?B8ca@J zTv{U8ie)3JU0yP$hQH2otm*v<*siaM`ammd`j9*`MOqsgr{*F-o2Vf9$y7W}EjlO8 zsgO*@W(Zz2ss71;kGSxg2MGgDO+*eTv6Uq6dAH_v2GoxK#)e8 z^iNm?XFVOV@lQ#18Cf=~4*C)H>ZF!wz6((a<~j@LarXUSDoUXx@D_o^P=?h)lx2Cu zPQS$LU2hx?^W=97mI_0nk`7Ov(GTg$b$6cQs(?}@%et+2rfh6=K0+cDsh`1r)Pnw` zK04X&v*Mz+uXLaa{mONReSUb-a?0Yul~&=9oL{tKko@_xr4Rm~QtRlsZ!iu+fqQcvm9ZYqS7U1IX&r~GkHeSTp^#$`8=Pz zaD#l=pGLX?Zl3hqV?9Ypf!AIis<5DB^`oG;^uawtm;(0WVT#VhwI)h6=;}t>2gM9> zV<}3gBxJb@jAl>?q`{J2_)-|i!qQ1aBUK4z2w^jF9etAP+s3otg-%e_{D`l8x{^kp zUD~hROQ4<8KC3R&ucm%JZFYImb%?Z2{S1x1bkW9l2U%@63O~|Rw{E2V(gd@-qd43} z;AqsdVfl0GShaeArMLx_uR4=BfwPME3xlr4jiER-g6WL`%a=Io+|aG-s!?#zDJTP$ zYR>YGV|AmgFss@cd~7v1>?Bz1MHp;s7;H6Y>?9~`YAd+fdjBGI!Pe5usaZL_@>8Rk zlg6J-JoOT?cz8+NszvWxCk*|ACz8~t;a*)eL-N*x}cP|Mc`oITJ}@2 zXTp2&z!f4a0NTa-4;>K+;8cMJ6Lbp-ZdmqYJ4qrMypse%4;TW(gk@5&xJC%A=Imi5y1=G7q8Q`qpr9@L;m;~}SSR25s@kfMO z@URijJVkmQ0`u2pj0IX?42kGG)sZl|aC@5VTOfA*dA)?Ka5h^R=&v0cWcz!qAU2T` zA{tOng<1&EePsly!x1_JdLT9|dIHwdCZN86hakpxw+7N$5o~r>U|z6Qe7HdB**d5E zDEv&zOj70vfk+_84HD7qli>ayUqAzDSb(5ArULiEal-rlyA$RG)jixMwbKXd+Hem~ zQ9%zMzAj6^3p2O>fzi5k4-03R9|dB%-;cnf^}z|imxy~LGtV^=3*UUiUmuDQ(|vw^ zd1?tyUPlPz2Py*_I+0%!)f_f}%@Ve~`-W8CRUNqWRdXNINxq+T-m?LZmH5lkA|?zTAvN<<4jA3G=?AVWo21HT z4P3(LroXxK=V_M@Umw{gYUK}KKguM&KC};92Q^8h=L}yD0{@f@Unjr%NVTiRA50@! z{>**Ajop>XTLY?|T22_aWYbM2+a<07^Qr>#x`j!l7Y|=IHc7Qh#~)nTCAv4$OnmbGV-ICBS3&x|p$5D*j-n zNs9iLtij_~px@8xdB6Vr4Ux)!B#_X32Tw}r;uVb{To~q_-^jnNz1=~1L63YskabSH z;dP7Czx z^4r6moceM($5F7}e zy|`DYLk@H-eFn>yxh!%R<+2B`i~t^m;mfKJ5VD6VEbfhWjZT;%$z+W?8)zu5)Zk8j zDY`zWAGt3`7)cm-txQ-WUrlq|%k%x(MT09s9vIeOY;>&&6hvcpu`e^9t|q;G8DDPR z1_Og?j~;^#iSfN7b@O-qD7AvHK(P_v?t+wmW{nte`KH*fj@OS6@f3|CwH_hre%$?1 zt-@1g0>ayi*9AYbh(49GV#53#NMVA@ptDEC=*O7lp|kCwX9>k`vvPe@7Dd7n_y)WO z=G7+Di)um27BsefNb6{IXDe&-?Lf`BwK42_)SC32HGz%jJqa)tIp&CuFMpnwp_c*- zATbPGeiu#%*~v%vLv@r`Y_{(=5dxV8H2x}9H=zx2S`0C8YD#&P^9@4-v<#=rX)MIkvsm#s z9NQ_N*fNBR;5lh>QzG$Zr8OU{M_4(tf129WMp0Y~_pw-%DmBWfFF7@8njlpC z;MLUFrW~1Z-c!_dL|uf5-w$ac?Z%6YT0DRMkpd{i9@c=6dpry(x$kSJ1kKT_B?dsg zjgB=(zSxPur4yA{cSS<-9cyl3O1^r&8ZYH2?ZsPT{%OA~{(i6Fb3A$K8M{=gzP2du z6U^F0=J-8h8>V>@{$ZKp z$^K`IXlZ%;=)@~tNAfGF*`O3juQSH>;b;_|R$P|w!f>iIF7Bm`-hG4vox7(rrb+Gl zcBx9`?_v=>U+7k;7od`4f=DaFgiIzYRV&rZ1dlW4Q7Q#W#!Z=Q4v-G6H8{nMjA)k> zOQunc63E%@ihBE&f~Q|(P%@%K%ppN)&<(Rb@&+*Gg-OmdJux32O;=>gc5Cs%eV=zF z-6m;M(uy8xxey9c*)nm(p`FVb{d|kt0bbKtH!QLs`%6hZj#eMV%)I6c({6#2E3)|j zSxGHd?MN=#-fW{-oTM$D9iHO_ovg!P^69w_G^{z_+Pv0qwDjH*Jqv5C;Dy1*z9S`b zJ!>vWLfR^fS;IQEs*~P*D>Ozg`4F6i88PeyE@waEK7kG~W>AIDjk}FnQYw0S3NGp? zDhb`3=^e@25M6=#?d3G*2j^p1!WfjMAfedn#e$UMmKN7b)&9hi>T-g4^Ko%|M^}rz zkaR5PvvjS8+fR*p=NXwSzCsY;ar*GbE0)SSs4tzZW;aQVs?)$HO~e)kwA`Llxa+Sk z%#j=%<#k2YHpAn|0GwQvvJTD{VTw^6;3U6ryO=EddDyd9;1$FeNt2oq-GawALgZ3| z%G|>%mpR!5M*>`PLOlfX1{#7#Wqyd!V9?k=+p{0X7JW$ZWiUWWrk03uFCK&!c^^zM znUQOFv#if* z5`Rc$&2LdLUY05h!xRT6`MpYx2A6-943+4zU*c_-fWz#0wQnlVkwqQB@eE7W_7a~ThGc`HQyPx4Sm~UsJrURr=9PEV9c2c-6 zt@;dRBVE=ipv={?zedU6f{Wer_53M#5#@(|6em-!ASg zq#$L8Y)YafAn2>F>%gumqa2kgL|VsKL9P6ANHu#a3z=Vm^fM>L#ni$4sa}Fsu{nW1 zLrp8>eEsI+9q^6+9TmS0Z?bFygzq*E;uMgnj)v2Z+9 zjy3(!y8gDgyBlMjr=6Acy%Mcqt;?;tMIph>)g=Iz7hu!4B8jo)2-i$U&4S*j6K@!(*G0D{Y_VS^sme!x1uW zZq0c7nzI@a#S+>E$y^2U&I{h&ayNdlx~*ry$(d}ja7+B9Q5bfee$O+K-T=VOsWd~V(-?_}$0h{Woo~K})Wc&7wgbfr`Msbk<0n{QZ=<*(#RYuE3Q(V17 zvqsIPQS0P8_k;SwX@8cu_}tV>{s+*9$P2;gy(!fP(F2GFoW0)ptJMNew?d!ro7L@? z>?lQpJDDFJRe}~pfDa)MLer6#>JLi=?gw6piuu1Vq8y4-@nHQV=DFaRq{E?$IkzF` z0>jv5<@_{k9q3*yo@O~uu;)G?y|vt<2`(=Yi`^?sdz?p3*F}WVt?-l*R-bySsk71N z;4dAvhxO}lQ4#hVTGR^r3R+$PX^+fFmoZII@VR4zlyCM7orTfmJW9WkM2dgwW`b6b zo(q{w^WyxNj08FLRUYw_^Cwdrd0*yCp*hP^-`54xkIWTZm@qpGleYd=V~As_H_L|K zHh!E5bJc#o|FQ!U>PA>^O0-b!ws=9( z*qFCVgs`B)w(L5d?M0DJTDL`DaeqxfAcaMslfqD){;q?M^x@g6lFEUylQGT5Y7V-dTrMhs*jL|vponbx4%BZu{Bmht}xYg5zsp$Bv zMs!3XEVk*XR0K@vRuV;oeZG^-8*!D`>mm>>UR~^FL8_4oKL+^)`NY3#S`gwJ@aYN$ zgCsSnl+m0dhs@7r!5u=mCbcH{2Pxu=wNZ1m3EcWI27^IgRgQ~ZQc~;F$53=`a{s>QYf!# z#oS>jS}dAlFb*?S6xui~#b~U0NBA!ODY^erZ&WXPFVV7o#mF_Tgo*o``)u86+v)92`qQpz|d$`YuOpF@5W%=*k$D~}nD)}X5kDr?oQn&i&1 zCY2^;nzBQC832?qCeeN+AJ_Alag8&Tj>oo@-6EAoO^r;{`LiJd{|U5M?p&P~3^%+uji!OstinLUf1$J8vwiqs8A_FNqFgcsmbc^y3 zyK}VJ&wh%$TgyG7L6noYZrb4#)>3>O#aBu&ErB8osV^7?j;04~3fnr&&cAwnH`Bfc z%p`J2*wH#*pXl@>&+s5>FZjm%nx(u~AZ|oGy71@s6f`Cq%C`LBTM4js&Mr3Q8qtP3 z3%^Rcw4#yBK@C-EgFhR8KYlD$(lX?sLi8ORuYjgC79ZR-_z%fN==|Ew)7a`G+5AxG9I+>EE zL=uVYAwAr=72^Q<0#$K~&%=jOvc{%wh}2`HQsuJwgO^vkd&F~3yo?03c-9h$2PJkY zk3n`sgK^tp!Q#P^r)jtY0Rm81OHl+dbFItF9jqMyLc>qbnA2aD<`c-Sd8c0vyRL64 z##(0UW-EDZ%tyd)7CQ2-OY>;s%83du`(i9-baaPqgF^(+4AV0#tK6EI+=O2kQn|&7a6n zc|A7U7WQOO7OfogL?_IDVWC^a!oZMKQ2hCh9TY_F_;$XbVa9Xvhbc382A|+3yTz?7 z!VJo!O6qC+s%akwUCZop^irHjRGKrlKo@dy`NUy z!fa+dJZkXFGMP#xGOML2;uwM~U{VEIW`=fr%CBnq@(w(q3&cq~hnJcAt-un@eGoCw+_6nO)>6U3_N&P+RwjqM*8D5eMQ+A2!FKP4nak zOR_n5ws<(H#e%-VC@TdzV!ir}=kyjx%);?4CSKutJ=A0@?%=|qlBY>f8}*Ux5)S>m zqKAyc#gW8aU8ejbjD$kf_~O%|4g5`2baMx1yFN^pQjHV4{4#bWzHVquQW~GEVu>on z&x}~b)t)71b)~d+s6l|1y8);@R>#w)pPIks;Let}(u9IiTORy{BOXqV*_~C)+O>LV zre|W0=HpmZR7w)G&w(-0rjFu-_Au7C>(0i)i712ivUi@*PcyNyHk?cM2*)$$L(&Mc z9_qO*MW#<@==0g1(?G)c(}v~;pUu;9(`+lr{Zzf6nckuC1x(SK$ubrh=wuFCGN>X8 z@74q_dTPkijIWU5qne~PXe^WZ3*ie1EA>iTq)-O#x0=cq1n8v(J;HFJjb$pc<4wC2 zShUU79m&w7MZH@rKk<4OV)kM+NQ`=JAV{iPzpX!YMbS>fXUU5tVWy4TsHff^Pcz1B zy z9x8A%Ly9>GQ1V(VWfjm1{Ap6RIC(;`??JV$G@l&|7zmYNKAbdmq_7YO@E$T7jT3*yo z7tWtSPw|$NG;8%4(z*4%uF1;Ur?~39^bHeOZSAgkOfL-DVk|#GP3w{wKQg#icZ!#? z29aZZ$G)}dAJIpo4CK)H|hOqfKD%Lsprc@#Ry{8z5+*<+jX{uDLa{N0Q7%X?& zE@gR)6X~#~gPdWbc+|MAhCpfngtFNo*E+h#;yOiN&aDx|Re8E|dkt&LwQ@hYMyefm z7~9riPSos4BqC=f*2KJHF{HTpJ|`A5N~AGD67&yP77`l4;ervwQ02z&X(pb|7F~eh zjT|O)RSCrCEM($nxyBeQ?BN?pHYfw{y6>l82fZn&MAD2ngah4g>MP2 zoeJYQSo_B4fu((2J8pwkQB0Q{@fJHqxr>|~(#UI0`!pe7Yf>JbN=aD8l4RUWoF1yV z{x}4p#vE;T8$7|g?QrK(r?AJ-rO`D%e%P4CrF4TWs3AF7SdFvj5iE@z%5M?7wSG6$ zfS4|hqp4DAtk+bfdHn_YhGc6BZ&}1x|CP2lSPc3*XCu$c;H!{_O}@_|a`a0mC~;~T zB|35H2%H3kymr!;3K#FwHSs~3 zG7lv!FXCRXLccyj^HJFmddZiBNt!Y3ZP_4Yg~w}r=OaCWa?w>)tjADAKeU3dyKigo zdvUh?9M6atvhE~&rks7Yj^}q5cSCp1P6B2M_~WV#`3XLAP4~iUF$EoB@J(-pGh0zVq|u-j$)|ZMQTJ z`BjrO>D%*Xrc3N@wL7LXLT7Pgj75w-?ymAk6-?zT-P$MCZtgn)>&zNCq-N)#2M~YL zvPC%*DuC#)}BA2^Hx4O8lgv(~gUPf)th46Ibwcri_yAxy?Xj=7EyP6avqiSD%#W2@9t# zopa51_3~thFA8rZac`V0O~Af))jFF*v!})2Rs?N1@pwl^#}nkqR4>SaM(xLnru6$; zbX;kemsW8hcNzES*Hn>wo0u*wJNKTU8VHnujLhUI@7lDP=Rl$Zp8%fK6s7Jk_LhyX z@VIWg+^gd@(u0bVSt=gZn6Tq=6#1rvh5NuC+M{y$bFrJ1aiX)2ak7iz za1=e<3t7dW-rB&{HGsuxb_hAo zI~=)|cZRZ&&%vDQCg*+=SJ^;LY(-yNG4W(TBKTBK+X8Siao2MHz-g6Q&T%OYenDpw zzeA3IKg&i=d}z2kks5)3uThhQlkWQ9)i)m+-LNY7XedR*r}bQ6@Dkj7nXZF<2~|_n z-l@rmH0{8US!07Lpl)2QyhT|e@YA4C3)@`i>?hP<PG47m!^%oL997uI%UDD>hUD=2BH{w6ko{}-r)@LUJ?Pah*PqKQEtZx z!$wwiIs(J11_R@2A$yF?vVYCsYAIG0j`jFSeUJ(kXR#)CU?rxey@>;dG0Oa`Rpyud z#L-o0qnP`Jo^@T!=~9avqEpH-+Py%GXN0md0%w#{(@Jo!DMp55TV_RsZZS+$*ko=L z$pLcW{-*NmI-xj5*41w6E+~Dn0dcd{%SSwt`!nm8lx@8e zKmBla!lHK9bXVfTl#foSYN$LY*8J@B?v|Uf)6>f)uZk$Y{Lh=5eDQbTuew-uitoYQ zQopBS%v#~(lwO0Kf@wMVKEeG;xUYC-Mnks&N{J!eaS&v7;c^Bf(~N|C5%9xqL}t4; z?3T)x8&eSq%_M0s6(87v-WCw_Q0s%DoVeE)q}vIBlO$Gil0kI}vf-A6wM9$r)@84a zq<99Zd=U;G7pt}RaFIb3PkJ7J%TRbK#wE~hovWMG;li2oEM|mMMkOs#-KkR|eIANt zE<`3h82c7p~ocRn?`+ zF=X$36J^#27W|K^@qf$-b^4fqU% zb^nFk0wL%BPx6@-0OX0^_`H9!&nzq)bSyyL831IT*;whAS($-oC=jn>paU?m0x47g z1Aq;ffgQl~Hv*dRjobSx$H(%U<9ovg3IAy0zX|BKarzqp{lARHpFHCK-!L?Q8Aw6@ z2SC<*!?uBh?r&Tc$m**9N$>vE{vV?EFSfsMzQ2jzzkt$jBrpRZJ39xEWM%-c{)^3J z`b*Y7*xdgl|E1zTdELL{{q5o(2=#wBZq0wexIn)9FWluDg#2&r@_z#@8QGct2IEF8 znffvy`k!5)+BUX+;VvoDMASK|TNg%{IRQ0hT>KP9)CaTH1|UJl4%|rY=DU!UQ?8c~ zhv^K!at=f1Ec6Ak18}0omGQyC0LZ};k=Nk>k?IbXxnX_l0wuO8ZU{(TO@}H&ukO#G zw7eDTE0D|zH&*x5-}-M#ZV=!LKKhnWe)_r>tA+D23k&IG$)()2ldBnCwpYH{X6!p} zD^$Oc-676~E1m74@4T-J@(`j9CP?E?&EQV09E>cRA4o|e!JV6% zf4phW=BH^%kRB;?xgWtqMWzBB-NQlBIicv1wI1e81xC;Oan(QS`H`>qtXb8SweMk@ zpdY73jRfuVEMK(Mq4Wvkj6O=}zt04~+a&y7VYdIvUid#HZr`@yKZx76nf}X8b+R!q zgnnc7p@HP~zh8u`EG$ecgocEF+5o_c-d= z|2-cIBQPApKW$7*|J7d>CU(|;w!zBA{uTn^Z@#dy1O97VSlLjSeuKbDrQosNy&9}A0>gMp0$nv6_XRs{P0 E0kV3^a{vGU literal 0 HcmV?d00001 From 8a715fadd587dc89d5728090ba17cbad21a74398 Mon Sep 17 00:00:00 2001 From: Rajeshbabu Chintaguntla Date: Thu, 29 Feb 2024 14:19:22 +0530 Subject: [PATCH 268/514] =?UTF-8?q?HBASE-27949=20[JDK17]=20Add=20JDK17=20c?= =?UTF-8?q?ompilation=20and=20unit=20test=20support=20to=20ni=E2=80=A6=20(?= =?UTF-8?q?#5689)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HBASE-27949 [JDK17] Add JDK17 compilation and unit test support to nightly job HBASE-26372 [JDK17] Jenkins build support --- dev-support/Jenkinsfile | 119 ++++++++++++++++++++++++++++ dev-support/Jenkinsfile_GitHub | 140 +++++++++++++++++++++++++++++++++ dev-support/docker/Dockerfile | 15 ++++ 3 files changed, 274 insertions(+) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 19256ccb9b12..461df28c8e9d 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -37,6 +37,7 @@ pipeline { OUTPUT_DIR_RELATIVE_JDK8_HADOOP2 = 'output-jdk8-hadoop2' OUTPUT_DIR_RELATIVE_JDK8_HADOOP3 = 'output-jdk8-hadoop3' OUTPUT_DIR_RELATIVE_JDK11_HADOOP3 = 'output-jdk11-hadoop3' + OUTPUT_DIR_RELATIVE_JDK17_HADOOP3 = 'output-jdk17-hadoop3' PROJECT = 'hbase' PROJECT_PERSONALITY = 'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh' @@ -188,6 +189,7 @@ pipeline { stash name: 'jdk8-hadoop2-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK8_HADOOP2}/doesn't-match" stash name: 'jdk8-hadoop3-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK8_HADOOP3}/doesn't-match" stash name: 'jdk11-hadoop3-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK11_HADOOP3}/doesn't-match" + stash name: 'jdk17-hadoop3-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK17_HADOOP3}/doesn't-match" stash name: 'srctarball-result', allowEmpty: true, includes: "output-srctarball/doesn't-match" } } @@ -628,6 +630,123 @@ pipeline { } } } + + stage ('yetus jdk17 hadoop3 checks') { + when { + anyOf { + branch 'master';branch 'branch-3' + } + } + agent { + node { + label 'hbase' + } + } + environment { + BASEDIR = "${env.WORKSPACE}/component" + TESTS = "${env.DEEP_CHECKS}" + OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_JDK17_HADOOP3}" + OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_JDK17_HADOOP3}" + SET_JAVA_HOME = "/usr/lib/jvm/java-17" + // Activates hadoop 3.0 profile in maven runs. + HADOOP_PROFILE = '3.0' + SKIP_ERRORPRONE = true + } + steps { + // Must do prior to anything else, since if one of them timesout we'll stash the commentfile + sh '''#!/usr/bin/env bash + set -e + rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}" + echo '(x) {color:red}-1 jdk17 hadoop3 checks{color}' >"${OUTPUT_DIR}/commentfile" + echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile" + ''' + unstash 'yetus' + dir('component') { + checkout scm + } + sh '''#!/usr/bin/env bash + set -e + rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine" + "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine" + echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'" + ls -lh "${OUTPUT_DIR_RELATIVE}/machine" + ''' + script { + def ret = sh( + returnStatus: true, + script: '''#!/usr/bin/env bash + set -e + declare -i status=0 + if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then + echo '(/) {color:green}+1 jdk17 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" + else + echo '(x) {color:red}-1 jdk17 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" + status=1 + fi + echo "-- For more information [see jdk17 report|${BUILD_URL}JDK17_20Nightly_20Build_20Report_20_28Hadoop3_29/]" >> "${OUTPUT_DIR}/commentfile" + exit "${status}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } + } + post { + always { + stash name: 'jdk17-hadoop3-result', includes: "${OUTPUT_DIR_RELATIVE}/commentfile" + junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true + // zip surefire reports. + sh '''#!/bin/bash -e + if [ -d "${OUTPUT_DIR}/archiver" ]; then + count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l) + if [[ 0 -ne ${count} ]]; then + echo "zipping ${count} archived files" + zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver" + else + echo "No archived files, skipping compressing." + fi + else + echo "No archiver directory, skipping compressing." + fi + ''' + sshPublisher(publishers: [ + sshPublisherDesc(configName: 'Nightlies', + transfers: [ + sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", + sourceFiles: "${env.OUTPUT_DIR_RELATIVE}/test_logs.zip" + ) + ] + ) + ]) + // remove the big test logs zip file, store the nightlies url in test_logs.html + sh '''#!/bin/bash -e + if [ -f "${OUTPUT_DIR}/test_logs.zip" ]; then + echo "Remove ${OUTPUT_DIR}/test_logs.zip for saving space" + rm -rf "${OUTPUT_DIR}/test_logs.zip" + python3 ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" > "${OUTPUT_DIR}/test_logs.html" + else + echo "No test_logs.zip, skipping" + fi + ''' + // Has to be relative to WORKSPACE. + archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/*" + archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/**/*" + publishHTML target: [ + allowMissing : true, + keepAll : true, + alwaysLinkToLastBuild: true, + // Has to be relative to WORKSPACE. + reportDir : "${env.OUTPUT_DIR_RELATIVE}", + reportFiles : 'console-report.html', + reportName : 'JDK17 Nightly Build Report (Hadoop3)' + ] + } + } + } + // This is meant to mimic what a release manager will do to create RCs. // See http://hbase.apache.org/book.html#maven.release // TODO (HBASE-23870): replace this with invocation of the release tool diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index bf9169034119..62b16287e1e4 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -55,6 +55,7 @@ pipeline { WORKDIR_REL_GENERAL_CHECK = 'yetus-general-check' WORKDIR_REL_JDK8_HADOOP3_CHECK = 'yetus-jdk8-hadoop3-check' WORKDIR_REL_JDK11_HADOOP3_CHECK = 'yetus-jdk11-hadoop3-check' + WORKDIR_REL_JDK17_HADOOP3_CHECK = 'yetus-jdk17-hadoop3-check' ASF_NIGHTLIES = 'https://nightlies.apache.org' ASF_NIGHTLIES_BASE_ORI = "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" ASF_NIGHTLIES_BASE = "${ASF_NIGHTLIES_BASE_ORI.replaceAll(' ', '%20')}" @@ -470,6 +471,145 @@ pipeline { } } } + stage ('yetus jdk17 hadoop3 checks') { + agent { + node { + label 'hbase' + } + } + environment { + // customized per parallel stage + PLUGINS = "${JDK_SPECIFIC_PLUGINS}" + SET_JAVA_HOME = '/usr/lib/jvm/java-17' + WORKDIR_REL = "${WORKDIR_REL_JDK17_HADOOP3_CHECK}" + // identical for all parallel stages + WORKDIR = "${WORKSPACE}/${WORKDIR_REL}" + YETUSDIR = "${WORKDIR}/${YETUS_REL}" + SOURCEDIR = "${WORKDIR}/${SRC_REL}" + PATCHDIR = "${WORKDIR}/${PATCH_REL}" + BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" + DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" + YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true + } + when { + allOf { + // this will return true if the pipeline is building a change request, such as a GitHub pull request. + changeRequest() + expression { env.CHANGE_TARGET in ['master', 'branch-3'] } + } + } + steps { + dir("${SOURCEDIR}") { + checkout scm + } + dir("${YETUSDIR}") { + checkout([ + $class : 'GitSCM', + branches : [[name: "${YETUS_VERSION}"]], + userRemoteConfigs: [[url: 'https://github.com/apache/yetus.git']]] + ) + } + dir("${WORKDIR}") { + withCredentials([ + usernamePassword( + credentialsId: 'apache-hbase-at-github.com', + passwordVariable: 'GITHUB_PASSWORD', + usernameVariable: 'GITHUB_USER' + )]) { + script { + def ret = sh( + label: 'test-patch', + returnStatus: true, + script: '''#!/bin/bash -e + hostname -a ; pwd ; ls -la + printenv 2>&1 | sort + echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" + "${YETUS_DRIVER}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } + } + } + } + post { + always { + junit testResults: "${WORKDIR_REL}/${SRC_REL}/**/target/**/TEST-*.xml", + allowEmptyResults: true, skipPublishingChecks: true + sh label: 'zip surefire reports', script: '''#!/bin/bash -e + if [ -d "${PATCHDIR}/archiver" ]; then + count=$(find "${PATCHDIR}/archiver" -type f | wc -l) + if [[ 0 -ne ${count} ]]; then + echo "zipping ${count} archived files" + zip -q -m -r "${PATCHDIR}/test_logs.zip" "${PATCHDIR}/archiver" + else + echo "No archived files, skipping compressing." + fi + else + echo "No archiver directory, skipping compressing." + fi + ''' + sshPublisher(publishers: [ + sshPublisherDesc(configName: 'Nightlies', + transfers: [ + sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", + sourceFiles: "${env.WORKDIR_REL}/${env.PATCH_REL}/test_logs.zip" + ) + ] + ) + ]) + // remove the big test logs zip file, store the nightlies url in test_logs.txt + sh '''#!/bin/bash -e + if [ -f "${PATCHDIR}/test_logs.zip" ]; then + echo "Remove ${PATCHDIR}/test_logs.zip for saving space" + rm -rf "${PATCHDIR}/test_logs.zip" + python3 ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${WORKDIR_REL}/${PATCH_REL}" > "${PATCHDIR}/test_logs.html" + else + echo "No test_logs.zip, skipping" + fi + ''' + // Has to be relative to WORKSPACE. + archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit" + archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/**/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit/**/*" + publishHTML target: [ + allowMissing: true, + keepAll: true, + alwaysLinkToLastBuild: true, + // Has to be relative to WORKSPACE + reportDir: "${WORKDIR_REL}/${PATCH_REL}", + reportFiles: 'report.html', + reportName: 'PR JDK17 Hadoop3 Check Report' + ] + } + // Jenkins pipeline jobs fill slaves on PRs without this :( + cleanup() { + script { + sh label: 'Cleanup workspace', script: '''#!/bin/bash -e + # See YETUS-764 + if [ -f "${PATCHDIR}/pidfile.txt" ]; then + echo "test-patch process appears to still be running: killing" + kill `cat "${PATCHDIR}/pidfile.txt"` || true + sleep 10 + fi + if [ -f "${PATCHDIR}/cidfile.txt" ]; then + echo "test-patch container appears to still be running: killing" + docker kill `cat "${PATCHDIR}/cidfile.txt"` || true + fi + # See HADOOP-13951 + chmod -R u+rxw "${WORKSPACE}" + ''' + dir ("${WORKDIR}") { + deleteDir() + } + } + } + } + } } } } diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 1f841d1d8672..dcd84c89c218 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -110,6 +110,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk11.tar.gz "${OPENJDK11_URL}" && \ echo "${OPENJDK11_SHA256} */tmp/adoptopenjdk11.tar.gz" | sha256sum -c - +FROM base_image AS openjdk17_download_image +ENV OPENJDK17_URL 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.10%2B7/OpenJDK17U-jdk_x64_linux_hotspot_17.0.10_7.tar.gz' +ENV OPENJDK17_SHA256 'a8fd07e1e97352e97e330beb20f1c6b351ba064ca7878e974c7d68b8a5c1b378' +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk17.tar.gz "${OPENJDK17_URL}" && \ + echo "${OPENJDK17_SHA256} */tmp/adoptopenjdk17.tar.gz" | sha256sum -c - + ## # build the final image # @@ -158,6 +165,14 @@ RUN mkdir -p /usr/lib/jvm && \ ln -s /usr/lib/jvm/java-11-adoptopenjdk /usr/lib/jvm/java-11 && \ rm /tmp/adoptopenjdk11.tar.gz +# hadolint ignore=DL3010 +COPY --from=openjdk17_download_image /tmp/adoptopenjdk17.tar.gz /tmp/adoptopenjdk17.tar.gz +RUN mkdir -p /usr/lib/jvm && \ + tar xzf /tmp/adoptopenjdk17.tar.gz -C /usr/lib/jvm && \ + ln -s "/usr/lib/jvm/$(basename "$(tar -tf /tmp/adoptopenjdk17.tar.gz | head -n1)")" /usr/lib/jvm/java-17-adoptopenjdk && \ + ln -s /usr/lib/jvm/java-17-adoptopenjdk /usr/lib/jvm/java-17 && \ + rm /tmp/adoptopenjdk17.tar.gz + # configure default environment for Yetus. Yetus in dockermode seems to require # these values to be specified here; the various --foo-path flags do not # propigate as expected, while these are honored. From 5cb87ef0209551b1cdb584c0654bd029323a0cb8 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 29 Feb 2024 10:12:43 +0100 Subject: [PATCH 269/514] HBASE-28342 changes to DecommissionedHostRejectedException interface (addendum) (#5717) Signed-off-by: Nihal Jain --- .../DecommissionedHostRejectedException.java | 4 ++-- .../org/apache/hadoop/hbase/master/ServerManager.java | 1 + .../apache/hadoop/hbase/regionserver/HRegionServer.java | 2 +- .../hbase/regionserver/TestRegionServerReportForDuty.java | 8 ++++---- 4 files changed, 8 insertions(+), 7 deletions(-) rename hbase-server/src/main/java/org/apache/hadoop/hbase/{master => ipc}/DecommissionedHostRejectedException.java (94%) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/DecommissionedHostRejectedException.java similarity index 94% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/DecommissionedHostRejectedException.java index 3d28b1e75be8..f96e2f2afde0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DecommissionedHostRejectedException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/DecommissionedHostRejectedException.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master; +package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.yetus.audience.InterfaceAudience; -@InterfaceAudience.Private +@InterfaceAudience.Public public class DecommissionedHostRejectedException extends HBaseIOException { public DecommissionedHostRejectedException(String message) { super(message); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index a2ed4da53e39..3217b6dfcc92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index c71859ee6c1e..88863c06e4bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -115,12 +115,12 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.log.HBaseMarkers; -import org.apache.hadoop.hbase.master.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore; import org.apache.hadoop.hbase.monitoring.TaskMonitor; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java index b408229f59fa..ff8bdab5848a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.MatcherPredicate; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer; +import org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.master.DecommissionedHostRejectedException; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -293,9 +293,9 @@ public void testReportForDutyGetsRejectedByMasterWhenConfiguredToRejectDecommiss /** * Assert that the following log message occurred (one line): - * "org.apache.hadoop.hbase.master.DecommissionedHostRejectedException: - * org.apache.hadoop.hbase.master.DecommissionedHostRejectedException: Host localhost exists in - * the list of decommissioned servers and Master is configured to reject decommissioned hosts" + * "org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: + * org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host localhost exists in the + * list of decommissioned servers and Master is configured to reject decommissioned hosts" */ assertThat(Arrays.asList(capturer.getOutput().split("\n")), hasItem(allOf(containsString(DecommissionedHostRejectedException.class.getSimpleName()), From 937da9bcf3ae15494585f16e93ccaf63f4fc1295 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 29 Feb 2024 10:53:05 +0100 Subject: [PATCH 270/514] HBASE-28404 Use "set -x" when running release script in debug mode (#5715) --- dev-support/create-release/do-release-docker.sh | 5 +++++ dev-support/create-release/do-release.sh | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/dev-support/create-release/do-release-docker.sh b/dev-support/create-release/do-release-docker.sh index 804661945e09..4dcf0b2f1e26 100755 --- a/dev-support/create-release/do-release-docker.sh +++ b/dev-support/create-release/do-release-docker.sh @@ -102,6 +102,10 @@ shift $((OPTIND-1)) if (( $# > 0 )); then error "Arguments can only be provided with option flags, invalid args: $*" fi + +if [ "$DEBUG" = "1" ]; then + set -x +fi export DEBUG if [ -z "$WORKDIR" ] || [ ! -d "$WORKDIR" ]; then @@ -221,6 +225,7 @@ ASF_PASSWORD=$ASF_PASSWORD RELEASE_STEP=$RELEASE_STEP API_DIFF_TAG=$API_DIFF_TAG HOST_OS=$HOST_OS +DEBUG=$DEBUG EOF JAVA_MOUNT=() diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index 904d813fc3c6..6156a217ae34 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -17,6 +17,11 @@ # limitations under the License. # +# Turn on Bash command logging for debug mode +if [ "$DEBUG" = "1" ]; then + set -x +fi + # Make a tmp dir into which we put files cleaned-up on exit. TMPDIR=$(mktemp -d) trap "rm -rf $TMPDIR" EXIT From fed3fdd89ebf3a10fe49f90a3cbebfb1c48c8aea Mon Sep 17 00:00:00 2001 From: Rajeshbabu Chintaguntla Date: Thu, 29 Feb 2024 15:23:33 +0530 Subject: [PATCH 271/514] HBASE-28350 [JDK17] Unable to run hbase-it tests with JDK 17 (#5712) --- hbase-it/pom.xml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index dbf34c404155..96033b151299 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -37,6 +37,8 @@ **/IntegrationTest*.java 4g + -XX:+CMSClassUnloadingEnabled + ${failsafe.jdk8.flags} @@ -283,7 +285,7 @@ I believe it is a failsafe bug, we may consider using surefire --> 1800 -enableassertions -Xmx${failsafe.Xmx} - -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled + -Djava.security.egd=file:/dev/./urandom ${failsafe.profile.overrides} -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal @{jacocoArgLine} @@ -336,6 +338,10 @@ [1.11,) + + ${hbase-surefire.jdk11.flags} + + com.sun.xml.ws From 1e56034b58d74012ac9aeb4f5af2711314ea9b53 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 29 Feb 2024 12:06:46 +0100 Subject: [PATCH 272/514] HBASE-28403 Improve debugging for failures in procedure tests (#5709) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We see unit test failures in Jenkins that look like this: ``` java.lang.IllegalArgumentException: run queue not empty at org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument(Preconditions.java:143) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.load(ProcedureExecutor.java:332) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.init(ProcedureExecutor.java:665) at org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.restart(ProcedureTestingUtility.java:132) at org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.restart(ProcedureTestingUtility.java:100) at org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility.restartMasterProcedureExecutor(MasterProcedureTestingUtility.java:85) at org.apache.hadoop.hbase.master.assignment.TestRollbackSCP.testFailAndRollback(TestRollbackSCP.java:180) ``` This isn't enough information to debug the situation. The test code in question looks reasonable enough – it clears the object for re-use between tests. However, somewhere between stop/clear/start we miss something. Add some toString implementations and dump the objects in the preconditions. Signed-off-by: Duo Zhang --- .../AbstractProcedureScheduler.java | 8 +++++ .../hadoop/hbase/procedure2/LockAndQueue.java | 11 ++++--- .../hadoop/hbase/procedure2/LockStatus.java | 12 +++++++ .../hbase/procedure2/ProcedureExecutor.java | 9 +++--- .../procedure2/SimpleProcedureScheduler.java | 8 +++++ .../hbase/master/procedure/GlobalQueue.java | 8 +++++ .../procedure/MasterProcedureScheduler.java | 28 ++++++++++++++++ .../hbase/master/procedure/MetaQueue.java | 8 +++++ .../hbase/master/procedure/PeerQueue.java | 8 +++++ .../hadoop/hbase/master/procedure/Queue.java | 9 +++--- .../hbase/master/procedure/SchemaLocking.java | 32 ++++++++----------- .../hbase/master/procedure/ServerQueue.java | 8 +++++ .../hbase/master/procedure/TableQueue.java | 8 +++++ 13 files changed, 125 insertions(+), 32 deletions(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 61f73544b1bb..04b506594774 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -21,6 +21,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -293,4 +295,10 @@ protected void wakePollIfNeeded(final int waitingCount) { schedWaitCond.signalAll(); } } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("running", running) + .build(); + } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index fa6c72c490fc..bcf3982de7f0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -20,6 +20,8 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Stream; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.yetus.audience.InterfaceAudience; /** @@ -36,8 +38,9 @@ * NOT thread-safe. Needs external concurrency control: e.g. uses in MasterProcedureScheduler are * guarded by schedLock().
* There is no need of 'volatile' keyword for member variables because of memory synchronization - * guarantees of locks (see 'Memory Synchronization', - * http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Lock.html)
+ * guarantees of locks (see + *
Memory + * Synchronization)
* We do not implement Lock interface because we need exclusive and shared locking, and also because * try-lock functions require procedure id.
* We do not use ReentrantReadWriteLock directly because of its high memory overhead. @@ -182,7 +185,7 @@ public Stream filterWaitingQueue(Predicate predicate) { @Override public String toString() { - return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") - + ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .appendSuper(describeLockStatus()).append("waitingProcCount", queue.size()).build(); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java index d3723e1a35a7..61b302b0c8b1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.procedure2; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.yetus.audience.InterfaceAudience; /** @@ -68,4 +70,14 @@ default long getExclusiveLockProcIdOwner() { * Get the number of procedures which hold the shared lock. */ int getSharedLockCount(); + + default String describeLockStatus() { + ToStringBuilder builder = new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("exclusiveLock", hasExclusiveLock()); + if (hasExclusiveLock()) { + builder.append("exclusiveLockProcIdOwner", getExclusiveLockProcIdOwner()); + } + builder.append("sharedLockCount", getSharedLockCount()); + return builder.build(); + } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index e01a27d74675..8a5062be7918 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -326,10 +326,11 @@ public ProcedureExecutor(final Configuration conf, final TEnvironment environmen } private void load(final boolean abortOnCorruption) throws IOException { - Preconditions.checkArgument(completed.isEmpty(), "completed not empty"); - Preconditions.checkArgument(rollbackStack.isEmpty(), "rollback state not empty"); - Preconditions.checkArgument(procedures.isEmpty(), "procedure map not empty"); - Preconditions.checkArgument(scheduler.size() == 0, "run queue not empty"); + Preconditions.checkArgument(completed.isEmpty(), "completed not empty: %s", completed); + Preconditions.checkArgument(rollbackStack.isEmpty(), "rollback state not empty: %s", + rollbackStack); + Preconditions.checkArgument(procedures.isEmpty(), "procedure map not empty: %s", procedures); + Preconditions.checkArgument(scheduler.size() == 0, "scheduler queue not empty: %s", scheduler); store.load(new ProcedureStore.ProcedureLoader() { @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java index f2b4d4820da7..8998d3bebb2f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java @@ -19,6 +19,8 @@ import java.util.Collections; import java.util.List; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -82,4 +84,10 @@ public List getLocks() { public LockedResource getLockResource(LockedResourceType resourceType, String resourceName) { return null; } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()) + .append("runnables", runnables).build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/GlobalQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/GlobalQueue.java index 1633dc4856e7..541bbccc6d21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/GlobalQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/GlobalQueue.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; @@ -32,4 +34,10 @@ public GlobalQueue(String globalId, LockStatus lockStatus) { boolean requireExclusiveLock(Procedure proc) { return true; } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()) + .build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index fbf0eb8abf32..a5ef7c5d9239 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.function.Function; import java.util.function.Supplier; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; @@ -1129,4 +1131,30 @@ public String dumpLocks() throws IOException { schedUnlock(); } } + + private void serverBucketToString(ToStringBuilder builder, String queueName, Queue queue) { + int size = queueSize(queue); + if (size != 0) { + builder.append(queueName, queue); + } + } + + @Override + public String toString() { + ToStringBuilder builder = + new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()); + schedLock(); + try { + for (int i = 0; i < serverBuckets.length; i++) { + serverBucketToString(builder, "serverBuckets[" + i + "]", serverBuckets[i]); + } + builder.append("tableMap", tableMap); + builder.append("peerMap", peerMap); + builder.append("metaMap", metaMap); + builder.append("globalMap", globalMap); + } finally { + schedUnlock(); + } + return builder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java index 3eea59ef23be..3d313c9ac3ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -38,4 +40,10 @@ protected MetaQueue(LockStatus lockStatus) { boolean requireExclusiveLock(Procedure proc) { return true; } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()) + .build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerQueue.java index 3d07953c6f1e..7a2d917eb918 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerQueue.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -42,4 +44,10 @@ private static boolean requirePeerExclusiveLock(PeerProcedureInterface proc) { && proc.getPeerOperationType() != PeerOperationType.SYNC_REPLICATION_REPLAY_WAL && proc.getPeerOperationType() != PeerOperationType.SYNC_REPLICATION_REPLAY_WAL_REMOTE; } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()) + .build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/Queue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/Queue.java index 621c7ea69569..3ba79f8c41f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/Queue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/Queue.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureDeque; @@ -105,10 +107,7 @@ public int compareTo(Queue other) { @Override public String toString() { - return String.format("%s(%s, xlock=%s sharedLock=%s size=%s)", getClass().getSimpleName(), key, - lockStatus.hasExclusiveLock() - ? "true (" + lockStatus.getExclusiveLockProcIdOwner() + ")" - : "false", - lockStatus.getSharedLockCount(), size()); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("key", key) + .append("lockStatus", lockStatus.describeLockStatus()).append("size", size()).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java index 853d13b0c93b..642df36d535f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java @@ -22,6 +22,9 @@ import java.util.List; import java.util.Map; import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.locking.LockProcedure; @@ -227,27 +230,18 @@ void clear() { @Override public String toString() { - return "serverLocks=" + filterUnlocked(this.serverLocks) + ", namespaceLocks=" - + filterUnlocked(this.namespaceLocks) + ", tableLocks=" + filterUnlocked(this.tableLocks) - + ", regionLocks=" + filterUnlocked(this.regionLocks) + ", peerLocks=" - + filterUnlocked(this.peerLocks) + ", metaLocks=" - + filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock)) + ", globalLocks=" - + filterUnlocked(globalLocks); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("serverLocks", filterUnlocked(serverLocks)) + .append("namespaceLocks", filterUnlocked(namespaceLocks)) + .append("tableLocks", filterUnlocked(tableLocks)) + .append("regionLocks", filterUnlocked(regionLocks)) + .append("peerLocks", filterUnlocked(peerLocks)) + .append("metaLocks", filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock))) + .append("globalLocks", filterUnlocked(globalLocks)).build(); } private String filterUnlocked(Map locks) { - StringBuilder sb = new StringBuilder("{"); - int initialLength = sb.length(); - for (Map.Entry entry : locks.entrySet()) { - if (!entry.getValue().isLocked()) { - continue; - } - if (sb.length() > initialLength) { - sb.append(", "); - } - sb.append("{").append(entry.getKey()).append("=").append(entry.getValue()).append("}"); - } - sb.append("}"); - return sb.toString(); + return locks.entrySet().stream().filter(val -> !val.getValue().isLocked()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java index 9ed6cfacd860..a3144cc157a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -47,4 +49,10 @@ public boolean requireExclusiveLock(Procedure proc) { } throw new UnsupportedOperationException("unexpected type " + spi.getServerOperationType()); } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()) + .build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index 2f0cec77e18c..36c9df6e794e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -76,4 +78,10 @@ private static boolean requireTableExclusiveLock(TableProcedureInterface proc) { } throw new UnsupportedOperationException("unexpected type " + proc.getTableOperationType()); } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).appendSuper(super.toString()) + .append("namespaceLockStatus", namespaceLockStatus.describeLockStatus()).build(); + } } From 1c41e861f8f54206ad98ee9b8ab943d55210c65f Mon Sep 17 00:00:00 2001 From: Ahmad Alhour Date: Thu, 29 Feb 2024 14:55:25 +0100 Subject: [PATCH 273/514] HBASE-28354 RegionSizeCalculator throws NPE when regions are in transition (#5699) When a region is in transition, it may briefly have a null ServerName in meta. The RegionSizeCalculator calls RegionLocator.getAllRegionLocations() and does not handle the possibility that a RegionLocation.getServerName() could be null. The ServerName is eventually passed into an Admin call, which results in an NPE. This has come up in other contexts. For example, taking a look at getAllRegionLocations() impl, we have checks to ensure that we don't call null server names. We need to similarly handle the possibility of nulls in RegionSizeCalculator. Signed-off-by: Nick Dimiduk Signed-off-by: Hui Ruan --- .../hbase/mapreduce/RegionSizeCalculator.java | 15 +++++---- .../mapreduce/TestRegionSizeCalculator.java | 31 +++++++++++++------ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java index 4d027196a8fe..cc36ef5deb48 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java @@ -21,8 +21,10 @@ import java.util.Arrays; import java.util.Collections; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionMetrics; @@ -35,8 +37,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; - /** * Computes size of each region for given table and given column families. The value is used by * MapReduce for better scheduling. @@ -96,12 +96,11 @@ private void init(RegionLocator regionLocator, Admin admin) throws IOException { } private Set getRegionServersOfTable(RegionLocator regionLocator) throws IOException { - - Set tableServers = Sets.newHashSet(); - for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) { - tableServers.add(regionLocation.getServerName()); - } - return tableServers; + // The region locations could contain `null` ServerName instances if the region is currently + // in transition, we filter those out for now, which impacts the size calculation for these + // regions temporarily until the ServerName gets filled in later + return regionLocator.getAllRegionLocations().stream().map(HRegionLocation::getServerName) + .filter(Objects::nonNull).collect(Collectors.toSet()); } boolean enabled(Configuration configuration) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index f841bdbb61dc..2fda536438a7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -23,6 +23,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -67,8 +69,9 @@ public void testSimpleTestCase() throws Exception { assertEquals(123 * megabyte, calculator.getRegionSize(Bytes.toBytes("region1"))); assertEquals(54321 * megabyte, calculator.getRegionSize(Bytes.toBytes("region2"))); assertEquals(1232 * megabyte, calculator.getRegionSize(Bytes.toBytes("region3"))); + // if regionCalculator does not know about a region, it should return 0 - assertEquals(0 * megabyte, calculator.getRegionSize(Bytes.toBytes("otherTableRegion"))); + assertEquals(0, calculator.getRegionSize(Bytes.toBytes("otherTableRegion"))); assertEquals(3, calculator.getRegionSizeMap().size()); } @@ -105,24 +108,37 @@ public void testDisabled() throws Exception { // then disabled calculator. configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false); RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin); - assertEquals(0 * megabyte, disabledCalculator.getRegionSize(Bytes.toBytes(regionName))); - + assertEquals(0, disabledCalculator.getRegionSize(Bytes.toBytes(regionName))); assertEquals(0, disabledCalculator.getRegionSizeMap().size()); } + @Test + public void testRegionWithNullServerName() throws Exception { + RegionLocator regionLocator = + mockRegionLocator(null, Collections.singletonList("someBigRegion")); + Admin admin = mockAdmin(mockRegion("someBigRegion", Integer.MAX_VALUE)); + RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); + assertEquals(0, calculator.getRegionSize(Bytes.toBytes("someBigRegion"))); + } + /** * Makes some table with given region names. */ private RegionLocator mockRegionLocator(String... regionNames) throws IOException { + return mockRegionLocator(sn, Arrays.asList(regionNames)); + } + + private RegionLocator mockRegionLocator(ServerName serverName, List regionNames) + throws IOException { RegionLocator mockedTable = Mockito.mock(RegionLocator.class); when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable")); - List regionLocations = new ArrayList<>(regionNames.length); + List regionLocations = new ArrayList<>(regionNames.size()); when(mockedTable.getAllRegionLocations()).thenReturn(regionLocations); for (String regionName : regionNames) { RegionInfo info = Mockito.mock(RegionInfo.class); when(info.getRegionName()).thenReturn(Bytes.toBytes(regionName)); - regionLocations.add(new HRegionLocation(info, sn)); + regionLocations.add(new HRegionLocation(info, serverName)); } return mockedTable; @@ -133,10 +149,7 @@ private RegionLocator mockRegionLocator(String... regionNames) throws IOExceptio */ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); - List regionLoads = new ArrayList<>(); - for (RegionMetrics regionLoad : regionLoadArray) { - regionLoads.add(regionLoad); - } + List regionLoads = new ArrayList<>(Arrays.asList(regionLoadArray)); when(mockAdmin.getConfiguration()).thenReturn(configuration); when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable"))) .thenReturn(regionLoads); From acaf76a45aae5ed7b2f5463f93a0125e846f12b5 Mon Sep 17 00:00:00 2001 From: DieterDP <90392398+DieterDP-ng@users.noreply.github.com> Date: Fri, 1 Mar 2024 13:06:19 +0100 Subject: [PATCH 274/514] HBASE-28408 Rephrase confusing log message (#5718) Assume a user has a series of backups: Full1, Inc2, Inc3, where a table has not changed between Full1 and Inc3, but has changed after Inc3. When restoring that table to Inc3, a log warning was outputted mentioning there was no need for a restore. This message in fact means there is no need for the incremental restore portion of the restore process. Signed-off-by: Nihal Jain (cherry picked from commit 102c1b684a893dd3475eb2c047414cbe395f9b) --- .../apache/hadoop/hbase/backup/impl/RestoreTablesClient.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java index 05685c8e091e..654fe343e27d 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -173,7 +173,8 @@ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTa } if (dirList.isEmpty()) { - LOG.warn("Nothing has changed, so there is no need to restore '" + sTable + "'"); + LOG.info("No incremental changes since full backup for '" + sTable + + "', skipping incremental restore step."); return; } From ede4ccd2dc5f5690a518f97b6d28a7e18df9584a Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Fri, 1 Mar 2024 18:46:58 +0530 Subject: [PATCH 275/514] HBASE-20693 Refactor thrift jsp's and extract header and footer (#5732) - Fixes the way logLevel page renders in UI Signed-off-by: Nick Dimiduk --- .../resources/hbase-webapps/thrift/footer.jsp | 30 +++++++ .../resources/hbase-webapps/thrift/header.jsp | 74 ++++++++++++++++ .../resources/hbase-webapps/thrift/thrift.jsp | 85 ++++--------------- 3 files changed, 121 insertions(+), 68 deletions(-) create mode 100644 hbase-thrift/src/main/resources/hbase-webapps/thrift/footer.jsp create mode 100644 hbase-thrift/src/main/resources/hbase-webapps/thrift/header.jsp diff --git a/hbase-thrift/src/main/resources/hbase-webapps/thrift/footer.jsp b/hbase-thrift/src/main/resources/hbase-webapps/thrift/footer.jsp new file mode 100644 index 000000000000..53a7d0cdbdbb --- /dev/null +++ b/hbase-thrift/src/main/resources/hbase-webapps/thrift/footer.jsp @@ -0,0 +1,30 @@ +<%-- +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +--%> + + + + + + diff --git a/hbase-thrift/src/main/resources/hbase-webapps/thrift/header.jsp b/hbase-thrift/src/main/resources/hbase-webapps/thrift/header.jsp new file mode 100644 index 000000000000..f43872c11af6 --- /dev/null +++ b/hbase-thrift/src/main/resources/hbase-webapps/thrift/header.jsp @@ -0,0 +1,74 @@ +<%-- +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="org.apache.hadoop.hbase.HBaseConfiguration"%> + + + + + + + <%= request.getParameter("pageTitle")%> + + + + + + + + + +

diff --git a/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp b/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp index 6b308fc2a2dc..d148df1f2e29 100644 --- a/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp +++ b/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp @@ -19,7 +19,6 @@ --%> <%@ page contentType="text/html;charset=UTF-8" import="org.apache.hadoop.conf.Configuration" - import="org.apache.hadoop.hbase.HBaseConfiguration" import="org.apache.hadoop.hbase.util.VersionInfo" import="java.util.Date" %> @@ -27,72 +26,25 @@ <%@ page import="org.apache.hadoop.hbase.util.JvmVersion" %> <% -Configuration conf = (Configuration)getServletContext().getAttribute("hbase.conf"); -String serverType = (String)getServletContext().getAttribute("hbase.thrift.server.type"); -long startcode = conf.getLong("startcode", System.currentTimeMillis()); -String listenPort = conf.get("hbase.regionserver.thrift.port", "9090"); -ImplType implType = ImplType.getServerImpl(conf); + Configuration conf = (Configuration)getServletContext().getAttribute("hbase.conf"); + String serverType = (String)getServletContext().getAttribute("hbase.thrift.server.type"); + long startcode = conf.getLong("startcode", System.currentTimeMillis()); + String listenPort = conf.get("hbase.regionserver.thrift.port", "9090"); + ImplType implType = ImplType.getServerImpl(conf); -String transport = - (implType.isAlwaysFramed() || - conf.getBoolean("hbase.regionserver.thrift.framed", false)) ? "Framed" : "Standard"; -String protocol = - conf.getBoolean("hbase.regionserver.thrift.compact", false) ? "Compact" : "Binary"; -String qop = conf.get("hbase.thrift.security.qop", "None"); + String transport = + (implType.isAlwaysFramed() || + conf.getBoolean("hbase.regionserver.thrift.framed", false)) ? "Framed" : "Standard"; + String protocol = + conf.getBoolean("hbase.regionserver.thrift.compact", false) ? "Compact" : "Binary"; + String qop = conf.get("hbase.thrift.security.qop", "None"); + pageContext.setAttribute("pageTitle", "HBase Thrift Server: " + listenPort); %> - - - - - - HBase Thrift Server: <%= listenPort %> - - - - - - - - - + + +
@@ -165,8 +117,5 @@ String qop = conf.get("hbase.thrift.security.qop", "None");
- - - - - + + From 4f97ece9f5ab9288ea44f5842be55a4dbaa866e0 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 5 Mar 2024 09:01:22 +0100 Subject: [PATCH 276/514] HBASE-28379 Upgrade thirdparty dep to 4.1.6 (#5693) Signed-off-by: Nihal Jain Signed-off-by: Pankaj Kumar < pankajkumar@apache.org> --- hbase-examples/pom.xml | 2 +- hbase-protocol-shaded/pom.xml | 2 +- hbase-shaded/pom.xml | 2 +- pom.xml | 16 +++++++++++++--- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 5a7d8f957da0..731d9ae1df64 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -33,7 +33,7 @@ - 3.24.3 + 3.25.2 diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index b2d9f79bbd08..39ecccc0015d 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -34,7 +34,7 @@ - 3.24.3 + 3.25.2 + 2.16.1 + 2.16.1 2.3.1 3.1.0 2.1.1 @@ -909,7 +913,13 @@ 1.1.10.4 1.9 1.5.5-2 - 4.1.5 + + 4.1.6 0.8.8 From 2306820df8b41d9af5227465ee2cf9e18b8f0b5c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 6 Mar 2024 16:08:36 +0800 Subject: [PATCH 277/514] HBASE-28417 TestBlockingIPC.testBadPreambleHeader sometimes fails with broken pipe instead of bad auth (#5740) Also change the IPC related tests to test different combinations of rpc server&client, for example, NettyRpcClient and SimpleRpcServer Signed-off-by: Nick Dimiduk Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- .../hadoop/hbase/ipc/AbstractTestIPC.java | 86 ++++++++++++++++--- .../hadoop/hbase/ipc/TestBlockingIPC.java | 55 ++---------- .../apache/hadoop/hbase/ipc/TestNettyIPC.java | 40 ++++----- .../hadoop/hbase/ipc/TestNettyTlsIPC.java | 24 +++--- 5 files changed, 112 insertions(+), 95 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 1618709fa9bf..85f7c0a3e61a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -290,7 +290,7 @@ public void operationComplete(Future future) throws Exception { }); } - private void getConnectionRegistry(Channel ch, Call connectionRegistryCall) throws IOException { + private void getConnectionRegistry(Channel ch, Call connectionRegistryCall) { assert eventLoop.inEventLoop(); PreambleCallHandler.setup(ch.pipeline(), rpcClient.readTO, this, RpcClient.REGISTRY_PREAMBLE_HEADER, connectionRegistryCall); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index e4427c1690c3..0f0c22baf9fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -56,6 +56,7 @@ import io.opentelemetry.sdk.trace.data.SpanData; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.channels.SocketChannel; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; @@ -73,6 +74,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -80,8 +82,10 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.hamcrest.Matcher; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; +import org.junit.runners.Parameterized.Parameter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,14 +118,12 @@ public abstract class AbstractTestIPC { private static final KeyValue CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, CELL_BYTES); protected static final Configuration CONF = HBaseConfiguration.create(); - static { - // Set the default to be the old SimpleRpcServer. Subclasses test it and netty. - CONF.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, SimpleRpcServer.class.getName()); - } - protected abstract RpcServer createRpcServer(Server server, String name, + protected RpcServer createRpcServer(Server server, String name, List services, InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler) throws IOException; + RpcScheduler scheduler) throws IOException { + return RpcServerFactory.createRpcServer(server, name, services, bindAddress, conf, scheduler); + } private RpcServer createRpcServer(String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { @@ -133,6 +135,14 @@ private RpcServer createRpcServer(String name, List @Rule public OpenTelemetryRule traceRule = OpenTelemetryRule.create(); + @Parameter(0) + public Class rpcServerImpl; + + @Before + public void setUpBeforeTest() { + CONF.setClass(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl, RpcServer.class); + } + /** * Ensure we do not HAVE TO HAVE a codec. */ @@ -348,9 +358,43 @@ public void testTimeout() throws IOException { } } - protected abstract RpcServer createTestFailingRpcServer(final String name, + private static class FailingSimpleRpcServer extends SimpleRpcServer { + + FailingSimpleRpcServer(Server server, String name, + List services, InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler) throws IOException { + super(server, name, services, bindAddress, conf, scheduler, true); + } + + final class FailingConnection extends SimpleServerRpcConnection { + private FailingConnection(FailingSimpleRpcServer rpcServer, SocketChannel channel, + long lastContact) { + super(rpcServer, channel, lastContact); + } + + @Override + public void processRequest(ByteBuff buf) throws IOException, InterruptedException { + // this will throw exception after the connection header is read, and an RPC is sent + // from client + throw new DoNotRetryIOException("Failing for test"); + } + } + + @Override + protected SimpleServerRpcConnection getConnection(SocketChannel channel, long time) { + return new FailingConnection(this, channel, time); + } + } + + protected RpcServer createTestFailingRpcServer(final String name, final List services, final InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException; + Configuration conf, RpcScheduler scheduler) throws IOException { + if (rpcServerImpl.equals(NettyRpcServer.class)) { + return new FailingNettyRpcServer(null, name, services, bindAddress, conf, scheduler); + } else { + return new FailingSimpleRpcServer(null, name, services, bindAddress, conf, scheduler); + } + } /** Tests that the connection closing is handled by the client with outstanding RPC calls */ @Test @@ -570,19 +614,33 @@ public void testTracingErrorIpc() throws IOException { protected abstract AbstractRpcClient createBadAuthRpcClient(Configuration conf); + private IOException doBadPreableHeaderCall(BlockingInterface stub) { + ServiceException se = assertThrows(ServiceException.class, + () -> stub.echo(null, EchoRequestProto.newBuilder().setMessage("hello").build())); + return ProtobufUtil.handleRemoteException(se); + } + @Test - public void testBadPreambleHeader() throws IOException, ServiceException { + public void testBadPreambleHeader() throws Exception { Configuration clientConf = new Configuration(CONF); RpcServer rpcServer = createRpcServer("testRpcServer", Collections.emptyList(), new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createBadAuthRpcClient(clientConf)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); - ServiceException se = assertThrows(ServiceException.class, - () -> stub.echo(null, EchoRequestProto.newBuilder().setMessage("hello").build())); - IOException ioe = ProtobufUtil.handleRemoteException(se); - assertThat(ioe, instanceOf(BadAuthException.class)); - assertThat(ioe.getMessage(), containsString("authName=unknown")); + BadAuthException error = null; + // for SimpleRpcServer, it is possible that we get a broken pipe before getting the + // BadAuthException, so we add some retries here, see HBASE-28417 + for (int i = 0; i < 10; i++) { + IOException ioe = doBadPreableHeaderCall(stub); + if (ioe instanceof BadAuthException) { + error = (BadAuthException) ioe; + break; + } + Thread.sleep(100); + } + assertNotNull("Can not get expected BadAuthException", error); + assertThat(error.getMessage(), containsString("authName=unknown")); } finally { rpcServer.stop(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java index e60cc879fd4f..24177f28c40c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java @@ -18,20 +18,20 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.channels.SocketChannel; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.codec.Codec; -import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RPCTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +@RunWith(Parameterized.class) @Category({ RPCTests.class, MediumTests.class }) public class TestBlockingIPC extends AbstractTestIPC { @@ -39,11 +39,10 @@ public class TestBlockingIPC extends AbstractTestIPC { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBlockingIPC.class); - @Override - protected RpcServer createRpcServer(Server server, String name, - List services, InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { - return RpcServerFactory.createRpcServer(server, name, services, bindAddress, conf, scheduler); + @Parameters(name = "{index}: rpcServerImpl={0}") + public static List data() { + return Arrays.asList(new Object[] { SimpleRpcServer.class }, + new Object[] { NettyRpcServer.class }); } @Override @@ -73,41 +72,6 @@ protected boolean isTcpNoDelay() { }; } - private static class TestFailingRpcServer extends SimpleRpcServer { - - TestFailingRpcServer(Server server, String name, - List services, InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { - super(server, name, services, bindAddress, conf, scheduler, true); - } - - final class FailingConnection extends SimpleServerRpcConnection { - private FailingConnection(TestFailingRpcServer rpcServer, SocketChannel channel, - long lastContact) { - super(rpcServer, channel, lastContact); - } - - @Override - public void processRequest(ByteBuff buf) throws IOException, InterruptedException { - // this will throw exception after the connection header is read, and an RPC is sent - // from client - throw new DoNotRetryIOException("Failing for test"); - } - } - - @Override - protected SimpleServerRpcConnection getConnection(SocketChannel channel, long time) { - return new FailingConnection(this, channel, time); - } - } - - @Override - protected RpcServer createTestFailingRpcServer(String name, - List services, InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { - return new TestFailingRpcServer(null, name, services, bindAddress, conf, scheduler); - } - @Override protected AbstractRpcClient createBadAuthRpcClient(Configuration conf) { return new BlockingRpcClient(conf) { @@ -124,7 +88,6 @@ protected byte[] getConnectionHeaderPreamble() { } }; } - }; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java index a1b60e2cfa45..f2366a20fd2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java @@ -18,13 +18,10 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RPCTests; @@ -51,18 +48,27 @@ public class TestNettyIPC extends AbstractTestIPC { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestNettyIPC.class); - @Parameters(name = "{index}: EventLoop={0}") - public static Collection parameters() { - List params = new ArrayList<>(); - params.add(new Object[] { "nio" }); - params.add(new Object[] { "perClientNio" }); + private static List getEventLoopTypes() { + List types = new ArrayList<>(); + types.add("nio"); + types.add("perClientNio"); if (JVM.isLinux() && JVM.isAmd64()) { - params.add(new Object[] { "epoll" }); + types.add("epoll"); + } + return types; + } + + @Parameters(name = "{index}: rpcServerImpl={0}, EventLoop={1}") + public static List parameters() { + List params = new ArrayList<>(); + for (String eventLoopType : getEventLoopTypes()) { + params.add(new Object[] { SimpleRpcServer.class, eventLoopType }); + params.add(new Object[] { NettyRpcServer.class, eventLoopType }); } return params; } - @Parameter + @Parameter(1) public String eventLoopType; private static NioEventLoopGroup NIO; @@ -103,13 +109,6 @@ private void setConf(Configuration conf) { } } - @Override - protected RpcServer createRpcServer(Server server, String name, - List services, InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { - return new NettyRpcServer(server, name, services, bindAddress, conf, scheduler, true); - } - @Override protected NettyRpcClient createRpcClientNoCodec(Configuration conf) { setConf(conf); @@ -141,13 +140,6 @@ protected boolean isTcpNoDelay() { }; } - @Override - protected RpcServer createTestFailingRpcServer(String name, - List services, InetSocketAddress bindAddress, - Configuration conf, RpcScheduler scheduler) throws IOException { - return new FailingNettyRpcServer(null, name, services, bindAddress, conf, scheduler); - } - @Override protected AbstractRpcClient createBadAuthRpcClient(Configuration conf) { return new NettyRpcClient(conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java index 1cbf6be26c65..00a6b23336ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyTlsIPC.java @@ -67,37 +67,41 @@ public class TestNettyTlsIPC extends AbstractTestIPC { private static NettyEventLoopGroupConfig EVENT_LOOP_GROUP_CONFIG; - @Parameterized.Parameter(0) + @Parameterized.Parameter(1) public X509KeyType caKeyType; - @Parameterized.Parameter(1) + @Parameterized.Parameter(2) public X509KeyType certKeyType; - @Parameterized.Parameter(2) + @Parameterized.Parameter(3) public char[] keyPassword; - @Parameterized.Parameter(3) + @Parameterized.Parameter(4) public boolean acceptPlainText; - @Parameterized.Parameter(4) + @Parameterized.Parameter(5) public boolean clientTlsEnabled; private X509TestContext x509TestContext; + // only netty rpc server supports TLS, so here we will only test NettyRpcServer @Parameterized.Parameters( - name = "{index}: caKeyType={0}, certKeyType={1}, keyPassword={2}, acceptPlainText={3}," - + " clientTlsEnabled={4}") + name = "{index}: rpcServerImpl={0}, caKeyType={1}, certKeyType={2}, keyPassword={3}," + + " acceptPlainText={4}, clientTlsEnabled={5}") public static List data() { List params = new ArrayList<>(); for (X509KeyType caKeyType : X509KeyType.values()) { for (X509KeyType certKeyType : X509KeyType.values()) { for (char[] keyPassword : new char[][] { "".toCharArray(), "pa$$w0rd".toCharArray() }) { // do not accept plain text - params.add(new Object[] { caKeyType, certKeyType, keyPassword, false, true }); + params.add(new Object[] { NettyRpcServer.class, caKeyType, certKeyType, keyPassword, + false, true }); // support plain text and client enables tls - params.add(new Object[] { caKeyType, certKeyType, keyPassword, true, true }); + params.add( + new Object[] { NettyRpcServer.class, caKeyType, certKeyType, keyPassword, true, true }); // support plain text and client disables tls - params.add(new Object[] { caKeyType, certKeyType, keyPassword, true, false }); + params.add(new Object[] { NettyRpcServer.class, caKeyType, certKeyType, keyPassword, true, + false }); } } } From e3825ca952ab0090d0297a804ae0f787a2971fba Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Wed, 6 Mar 2024 20:06:29 -0500 Subject: [PATCH 278/514] HBASE-28359 Improve quota RateLimiter synchronization (#5683) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang --- .../hbase/quotas/DefaultOperationQuota.java | 5 ++ .../hbase/quotas/NoopOperationQuota.java | 5 ++ .../hadoop/hbase/quotas/OperationQuota.java | 11 +++ .../hadoop/hbase/quotas/RateLimiter.java | 31 ++++--- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 52 ++++++------ .../hbase/regionserver/RSRpcServices.java | 4 +- .../quotas/TestBlockBytesScannedQuota.java | 22 ++--- .../hadoop/hbase/quotas/TestRateLimiter.java | 83 +++++++++---------- 8 files changed, 124 insertions(+), 89 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index 4b89e18a8021..a4ff8b2a859e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -134,6 +134,11 @@ public long getReadAvailable() { return readAvailable; } + @Override + public long getReadConsumed() { + return readConsumed; + } + @Override public void addGetResult(final Result result) { operationSize[OperationType.GET.ordinal()] += QuotaUtil.calculateResultSize(result); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java index 71fc169d671f..b64429d9adc8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java @@ -68,4 +68,9 @@ public void addMutation(final Mutation mutation) { public long getReadAvailable() { return Long.MAX_VALUE; } + + @Override + public long getReadConsumed() { + return 0L; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java index ffc3cd50825c..bedad5e98673 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java @@ -80,4 +80,15 @@ public enum OperationType { /** Returns the number of bytes available to read to avoid exceeding the quota */ long getReadAvailable(); + + /** Returns the number of bytes consumed from the quota by the operation */ + long getReadConsumed(); + + /** + * Returns the maximum result size to be returned by the given operation. This is the greater of + * two numbers: the bytes available, or the bytes already consumed + */ + default long getMaxResultSize() { + return Math.max(getReadAvailable(), getReadConsumed()); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java index bda60ffa690a..5c69ad5d6cd5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java @@ -23,12 +23,10 @@ /** * Simple rate limiter. Usage Example: // At this point you have a unlimited resource limiter - * RateLimiter limiter = new AverageIntervalRateLimiter(); or new FixedIntervalRateLimiter(); - * limiter.set(10, TimeUnit.SECONDS); // set 10 resources/sec while (true) { // call canExecute - * before performing resource consuming operation bool canExecute = limiter.canExecute(); // If - * there are no available resources, wait until one is available if (!canExecute) - * Thread.sleep(limiter.waitInterval()); // ...execute the work and consume the resource... - * limiter.consume(); } + * RateLimiter limiter = new AverageIntervalRateLimiter(); // or new FixedIntervalRateLimiter(); + * limiter.set(10, TimeUnit.SECONDS); // set 10 resources/sec while (limiter.getWaitIntervalMs > 0) + * { // wait until waitInterval == 0 Thread.sleep(limiter.getWaitIntervalMs()); } // ...execute the + * work and consume the resource... limiter.consume(); */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -135,10 +133,23 @@ protected synchronized long getTimeUnitInMillis() { /** * Is there at least one resource available to allow execution? - * @return true if there is at least one resource available, otherwise false + * @return the waitInterval to backoff, or 0 if execution is allowed */ - public boolean canExecute() { - return canExecute(1); + public long getWaitIntervalMs() { + return getWaitIntervalMs(1); + } + + /** + * Are there enough available resources to allow execution? + * @param amount the number of required resources, a non-negative number + * @return the waitInterval to backoff, or 0 if execution is allowed + */ + public synchronized long getWaitIntervalMs(final long amount) { + assert amount >= 0; + if (!isAvailable(amount)) { + return waitInterval(amount); + } + return 0; } /** @@ -146,7 +157,7 @@ public boolean canExecute() { * @param amount the number of required resources, a non-negative number * @return true if there are enough available resources, otherwise false */ - public synchronized boolean canExecute(final long amount) { + private boolean isAvailable(final long amount) { if (isBypass()) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index d7eb0e537a39..8ae2cae01881 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -141,43 +141,47 @@ private static void setFromTimedQuota(final RateLimiter limiter, final TimedQuot public void checkQuota(long writeReqs, long estimateWriteSize, long readReqs, long estimateReadSize, long estimateWriteCapacityUnit, long estimateReadCapacityUnit) throws RpcThrottlingException { - if (!reqsLimiter.canExecute(writeReqs + readReqs)) { - RpcThrottlingException.throwNumRequestsExceeded(reqsLimiter.waitInterval()); + long waitInterval = reqsLimiter.getWaitIntervalMs(writeReqs + readReqs); + if (waitInterval > 0) { + RpcThrottlingException.throwNumRequestsExceeded(waitInterval); } - if (!reqSizeLimiter.canExecute(estimateWriteSize + estimateReadSize)) { - RpcThrottlingException.throwRequestSizeExceeded( - reqSizeLimiter.waitInterval(estimateWriteSize + estimateReadSize)); + waitInterval = reqSizeLimiter.getWaitIntervalMs(estimateWriteSize + estimateReadSize); + if (waitInterval > 0) { + RpcThrottlingException.throwRequestSizeExceeded(waitInterval); } - if (!reqCapacityUnitLimiter.canExecute(estimateWriteCapacityUnit + estimateReadCapacityUnit)) { - RpcThrottlingException.throwRequestCapacityUnitExceeded( - reqCapacityUnitLimiter.waitInterval(estimateWriteCapacityUnit + estimateReadCapacityUnit)); + waitInterval = reqCapacityUnitLimiter + .getWaitIntervalMs(estimateWriteCapacityUnit + estimateReadCapacityUnit); + if (waitInterval > 0) { + RpcThrottlingException.throwRequestCapacityUnitExceeded(waitInterval); } if (estimateWriteSize > 0) { - if (!writeReqsLimiter.canExecute(writeReqs)) { - RpcThrottlingException.throwNumWriteRequestsExceeded(writeReqsLimiter.waitInterval()); + waitInterval = writeReqsLimiter.getWaitIntervalMs(writeReqs); + if (waitInterval > 0) { + RpcThrottlingException.throwNumWriteRequestsExceeded(waitInterval); } - if (!writeSizeLimiter.canExecute(estimateWriteSize)) { - RpcThrottlingException - .throwWriteSizeExceeded(writeSizeLimiter.waitInterval(estimateWriteSize)); + waitInterval = writeSizeLimiter.getWaitIntervalMs(estimateWriteSize); + if (waitInterval > 0) { + RpcThrottlingException.throwWriteSizeExceeded(waitInterval); } - if (!writeCapacityUnitLimiter.canExecute(estimateWriteCapacityUnit)) { - RpcThrottlingException.throwWriteCapacityUnitExceeded( - writeCapacityUnitLimiter.waitInterval(estimateWriteCapacityUnit)); + waitInterval = writeCapacityUnitLimiter.getWaitIntervalMs(estimateWriteCapacityUnit); + if (waitInterval > 0) { + RpcThrottlingException.throwWriteCapacityUnitExceeded(waitInterval); } } if (estimateReadSize > 0) { - if (!readReqsLimiter.canExecute(readReqs)) { - RpcThrottlingException.throwNumReadRequestsExceeded(readReqsLimiter.waitInterval()); + waitInterval = readReqsLimiter.getWaitIntervalMs(readReqs); + if (waitInterval > 0) { + RpcThrottlingException.throwNumReadRequestsExceeded(waitInterval); } - if (!readSizeLimiter.canExecute(estimateReadSize)) { - RpcThrottlingException - .throwReadSizeExceeded(readSizeLimiter.waitInterval(estimateReadSize)); + waitInterval = readSizeLimiter.getWaitIntervalMs(estimateReadSize); + if (waitInterval > 0) { + RpcThrottlingException.throwReadSizeExceeded(waitInterval); } - if (!readCapacityUnitLimiter.canExecute(estimateReadCapacityUnit)) { - RpcThrottlingException.throwReadCapacityUnitExceeded( - readCapacityUnitLimiter.waitInterval(estimateReadCapacityUnit)); + waitInterval = readCapacityUnitLimiter.getWaitIntervalMs(estimateReadCapacityUnit); + if (waitInterval > 0) { + RpcThrottlingException.throwReadCapacityUnitExceeded(waitInterval); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 4926aa30c8a4..25b229fd0c4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -716,7 +716,7 @@ private List doNonAtomicRegionMutation(final HRegion region, // doNonAtomicBatchOp call. We should be staying aligned though the Put and Delete are // deferred/batched List mutations = null; - long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable()); + long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getMaxResultSize()); IOException sizeIOE = null; ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder(); @@ -3611,7 +3611,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } RpcCall rpcCall = RpcServer.getCurrentCall().orElse(null); // now let's do the real scan. - long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable()); + long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getMaxResultSize()); RegionScanner scanner = rsh.s; // this is the limit of rows for this scan, if we the number of rows reach this value, we will // close the scanner. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java index e27ba123381c..5de9a2d1a900 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java @@ -103,9 +103,9 @@ public void testBBSGet() throws Exception { doPuts(10_000, FAMILY, QUALIFIER, table); TEST_UTIL.flush(TABLE_NAME); - // Add ~10 block/min limit + // Add ~10 block/sec limit admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.READ_SIZE, - Math.round(10.1 * blockSize), TimeUnit.MINUTES)); + Math.round(10.1 * blockSize), TimeUnit.SECONDS)); triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); // should execute at max 10 requests @@ -132,10 +132,10 @@ public void testBBSScan() throws Exception { doPuts(10_000, FAMILY, QUALIFIER, table); TEST_UTIL.flush(TABLE_NAME); - // Add 1 block/min limit. + // Add 1 block/sec limit. // This should only allow 1 scan per minute, because we estimate 1 block per scan admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, blockSize, - TimeUnit.MINUTES)); + TimeUnit.SECONDS)); triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); waitMinuteQuota(); @@ -148,9 +148,9 @@ public void testBBSScan() throws Exception { testTraffic(() -> doScans(100, table), 100, 0); testTraffic(() -> doScans(100, table), 100, 0); - // Add ~3 block/min limit. This should support >1 scans + // Add ~3 block/sec limit. This should support >1 scans admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, - Math.round(3.1 * blockSize), TimeUnit.MINUTES)); + Math.round(3.1 * blockSize), TimeUnit.SECONDS)); triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); // should execute some requests, but not all @@ -174,10 +174,10 @@ public void testBBSMultiGet() throws Exception { doPuts(rowCount, FAMILY, QUALIFIER, table); TEST_UTIL.flush(TABLE_NAME); - // Add 1 block/min limit. + // Add 1 block/sec limit. // This should only allow 1 multiget per minute, because we estimate 1 block per multiget admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, blockSize, - TimeUnit.MINUTES)); + TimeUnit.SECONDS)); triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); waitMinuteQuota(); @@ -190,9 +190,9 @@ public void testBBSMultiGet() throws Exception { testTraffic(() -> doMultiGets(100, 10, rowCount, FAMILY, QUALIFIER, table), 100, 0); testTraffic(() -> doMultiGets(100, 10, rowCount, FAMILY, QUALIFIER, table), 100, 0); - // Add ~100 block/min limit + // Add ~100 block/sec limit admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, - Math.round(100.1 * blockSize), TimeUnit.MINUTES)); + Math.round(100.1 * blockSize), TimeUnit.SECONDS)); triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); // should execute approximately 10 batches of 10 requests @@ -211,7 +211,7 @@ public void testBBSMultiGet() throws Exception { private void testTraffic(Callable trafficCallable, long expectedSuccess, long marginOfError) throws Exception { - TEST_UTIL.waitFor(90_000, () -> { + TEST_UTIL.waitFor(5_000, () -> { long actualSuccess; try { actualSuccess = trafficCallable.call(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java index 49df937f7c5c..ae9b96d7a6c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java @@ -18,8 +18,7 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotEquals; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -71,7 +70,7 @@ private void testWaitInterval(final TimeUnit timeUnit, final long limit, // consume all the available resources, one request at the time. // the wait interval should be 0 for (int i = 0; i < (limit - 1); ++i) { - assertTrue(limiter.canExecute()); + assertEquals(0, limiter.getWaitIntervalMs()); limiter.consume(); long waitInterval = limiter.waitInterval(); assertEquals(0, waitInterval); @@ -81,7 +80,7 @@ private void testWaitInterval(final TimeUnit timeUnit, final long limit, // There is one resource available, so we should be able to // consume it without waiting. limiter.setNextRefillTime(limiter.getNextRefillTime() - nowTs); - assertTrue(limiter.canExecute()); + assertEquals(0, limiter.getWaitIntervalMs()); assertEquals(0, limiter.waitInterval()); limiter.consume(); // No more resources are available, we should wait for at least an interval. @@ -94,7 +93,7 @@ private void testWaitInterval(final TimeUnit timeUnit, final long limit, // artificially go into the past to prove that when too early we should fail. long temp = nowTs + 500; limiter.setNextRefillTime(limiter.getNextRefillTime() + temp); - assertFalse(limiter.canExecute()); + assertNotEquals(0, limiter.getWaitIntervalMs()); // Roll back the nextRefillTime set to continue further testing limiter.setNextRefillTime(limiter.getNextRefillTime() - temp); } @@ -107,7 +106,7 @@ public void testOverconsumptionAverageIntervalRefillStrategy() { // 10 resources are available, but we need to consume 20 resources // Verify that we have to wait at least 1.1sec to have 1 resource available - assertTrue(limiter.canExecute()); + assertEquals(0, limiter.getWaitIntervalMs()); limiter.consume(20); // We consumed twice the quota. Need to wait 1s to get back to 0, then another 100ms for the 1 assertEquals(1100, limiter.waitInterval(1)); @@ -116,10 +115,10 @@ public void testOverconsumptionAverageIntervalRefillStrategy() { // Verify that after 1sec we need to wait for another 0.1sec to get a resource available limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); - assertFalse(limiter.canExecute(1)); + assertNotEquals(0, limiter.getWaitIntervalMs(1)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 100); // We've waited the full 1.1sec, should now have 1 available - assertTrue(limiter.canExecute(1)); + assertEquals(0, limiter.getWaitIntervalMs(1)); assertEquals(0, limiter.waitInterval()); } @@ -138,7 +137,7 @@ public long currentTime() { } }; EnvironmentEdgeManager.injectEdge(edge); - assertTrue(limiter.canExecute()); + assertEquals(0, limiter.getWaitIntervalMs()); // 10 resources are available, but we need to consume 20 resources limiter.consume(20); // We over-consumed by 10. Since this is a fixed interval refill, where @@ -149,10 +148,10 @@ public long currentTime() { // Verify that after 1sec also no resource should be available limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); - assertFalse(limiter.canExecute()); + assertNotEquals(0, limiter.getWaitIntervalMs()); // Verify that after total 2sec the 10 resource is available limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); - assertTrue(limiter.canExecute()); + assertEquals(0, limiter.getWaitIntervalMs()); assertEquals(0, limiter.waitInterval()); } @@ -161,12 +160,12 @@ public void testFixedIntervalResourceAvailability() throws Exception { RateLimiter limiter = new FixedIntervalRateLimiter(); limiter.set(10, TimeUnit.SECONDS); - assertTrue(limiter.canExecute(10)); + assertEquals(0, limiter.getWaitIntervalMs(10)); limiter.consume(3); assertEquals(7, limiter.getAvailable()); - assertFalse(limiter.canExecute(10)); + assertNotEquals(0, limiter.getWaitIntervalMs(10)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); - assertTrue(limiter.canExecute(10)); + assertEquals(0, limiter.getWaitIntervalMs(10)); assertEquals(10, limiter.getAvailable()); } @@ -182,7 +181,7 @@ public void testLimiterBySmallerRate() throws InterruptedException { limiter.setNextRefillTime(limiter.getNextRefillTime() - 500); for (int i = 0; i < 3; i++) { // 6 resources/sec < limit, so limiter.canExecute(nowTs, lastTs) should be true - assertEquals(true, limiter.canExecute()); + assertEquals(limiter.getWaitIntervalMs(), 0); limiter.consume(); } } @@ -237,7 +236,7 @@ public int testCanExecuteByRate(RateLimiter limiter, int rate) { int count = 0; while ((request++) < rate) { limiter.setNextRefillTime(limiter.getNextRefillTime() - limiter.getTimeUnitInMillis() / rate); - if (limiter.canExecute()) { + if (limiter.getWaitIntervalMs() == 0) { count++; limiter.consume(); } @@ -317,28 +316,28 @@ public void testUnconfiguredLimiters() throws InterruptedException { assertEquals(limit, avgLimiter.getAvailable()); assertEquals(limit, fixLimiter.getAvailable()); - assertTrue(avgLimiter.canExecute(limit)); + assertEquals(0, avgLimiter.getWaitIntervalMs(limit)); avgLimiter.consume(limit); - assertTrue(fixLimiter.canExecute(limit)); + assertEquals(0, fixLimiter.getWaitIntervalMs(limit)); fixLimiter.consume(limit); // Make sure that available is Long.MAX_VALUE - assertTrue(limit == avgLimiter.getAvailable()); - assertTrue(limit == fixLimiter.getAvailable()); + assertEquals(limit, avgLimiter.getAvailable()); + assertEquals(limit, fixLimiter.getAvailable()); // after 100 millseconds, it should be able to execute limit as well testEdge.incValue(100); - assertTrue(avgLimiter.canExecute(limit)); + assertEquals(0, avgLimiter.getWaitIntervalMs(limit)); avgLimiter.consume(limit); - assertTrue(fixLimiter.canExecute(limit)); + assertEquals(0, fixLimiter.getWaitIntervalMs(limit)); fixLimiter.consume(limit); // Make sure that available is Long.MAX_VALUE - assertTrue(limit == avgLimiter.getAvailable()); - assertTrue(limit == fixLimiter.getAvailable()); + assertEquals(limit, avgLimiter.getAvailable()); + assertEquals(limit, fixLimiter.getAvailable()); EnvironmentEdgeManager.reset(); } @@ -358,39 +357,39 @@ public void testExtremeLimiters() throws InterruptedException { assertEquals(limit, avgLimiter.getAvailable()); assertEquals(limit, fixLimiter.getAvailable()); - assertTrue(avgLimiter.canExecute(limit / 2)); + assertEquals(0, avgLimiter.getWaitIntervalMs(limit / 2)); avgLimiter.consume(limit / 2); - assertTrue(fixLimiter.canExecute(limit / 2)); + assertEquals(0, fixLimiter.getWaitIntervalMs(limit / 2)); fixLimiter.consume(limit / 2); // Make sure that available is whatever left - assertTrue((limit - (limit / 2)) == avgLimiter.getAvailable()); - assertTrue((limit - (limit / 2)) == fixLimiter.getAvailable()); + assertEquals((limit - (limit / 2)), avgLimiter.getAvailable()); + assertEquals((limit - (limit / 2)), fixLimiter.getAvailable()); // after 100 millseconds, both should not be able to execute the limit testEdge.incValue(100); - assertFalse(avgLimiter.canExecute(limit)); - assertFalse(fixLimiter.canExecute(limit)); + assertNotEquals(0, avgLimiter.getWaitIntervalMs(limit)); + assertNotEquals(0, fixLimiter.getWaitIntervalMs(limit)); // after 500 millseconds, average interval limiter should be able to execute the limit testEdge.incValue(500); - assertTrue(avgLimiter.canExecute(limit)); - assertFalse(fixLimiter.canExecute(limit)); + assertEquals(0, avgLimiter.getWaitIntervalMs(limit)); + assertNotEquals(0, fixLimiter.getWaitIntervalMs(limit)); // Make sure that available is correct - assertTrue(limit == avgLimiter.getAvailable()); - assertTrue((limit - (limit / 2)) == fixLimiter.getAvailable()); + assertEquals(limit, avgLimiter.getAvailable()); + assertEquals((limit - (limit / 2)), fixLimiter.getAvailable()); // after 500 millseconds, both should be able to execute testEdge.incValue(500); - assertTrue(avgLimiter.canExecute(limit)); - assertTrue(fixLimiter.canExecute(limit)); + assertEquals(0, avgLimiter.getWaitIntervalMs(limit)); + assertEquals(0, fixLimiter.getWaitIntervalMs(limit)); // Make sure that available is Long.MAX_VALUE - assertTrue(limit == avgLimiter.getAvailable()); - assertTrue(limit == fixLimiter.getAvailable()); + assertEquals(limit, avgLimiter.getAvailable()); + assertEquals(limit, fixLimiter.getAvailable()); EnvironmentEdgeManager.reset(); } @@ -413,19 +412,19 @@ public void testLimiterCompensationOverflow() throws InterruptedException { assertEquals(limit, avgLimiter.getAvailable()); // The initial guess is that 100 bytes. - assertTrue(avgLimiter.canExecute(guessNumber)); + assertEquals(0, avgLimiter.getWaitIntervalMs(guessNumber)); avgLimiter.consume(guessNumber); // Make sure that available is whatever left - assertTrue((limit - guessNumber) == avgLimiter.getAvailable()); + assertEquals((limit - guessNumber), avgLimiter.getAvailable()); // Manually set avil to simulate that another thread call canExecute(). // It is simulated by consume(). avgLimiter.consume(-80); - assertTrue((limit - guessNumber + 80) == avgLimiter.getAvailable()); + assertEquals((limit - guessNumber + 80), avgLimiter.getAvailable()); // Now thread1 compensates 80 avgLimiter.consume(-80); - assertTrue(limit == avgLimiter.getAvailable()); + assertEquals(limit, avgLimiter.getAvailable()); } } From 32292508b0a8e91ac76d32e29c93e4d1bf949236 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 7 Mar 2024 07:54:47 +0100 Subject: [PATCH 279/514] HBASE-28416 Remove hbase-examples from hbase-assembly (#5739) Signed-off-by: Andrew Purtell Signed-off-by: Nihal Jain Signed-off-by: Tak Lon (Stephen) Wu --- hbase-assembly/pom.xml | 4 ---- hbase-assembly/src/main/assembly/hadoop-three-compat.xml | 1 - 2 files changed, 5 deletions(-) diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index a6e88cdbf77e..44f8d0a198ff 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -160,10 +160,6 @@ org.apache.hbase hbase-http - - org.apache.hbase - hbase-examples - org.apache.hbase hbase-zookeeper diff --git a/hbase-assembly/src/main/assembly/hadoop-three-compat.xml b/hbase-assembly/src/main/assembly/hadoop-three-compat.xml index 9db45b0d4752..27962b6e473c 100644 --- a/hbase-assembly/src/main/assembly/hadoop-three-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-three-compat.xml @@ -40,7 +40,6 @@ org.apache.hbase:hbase-client org.apache.hbase:hbase-common org.apache.hbase:hbase-endpoint - org.apache.hbase:hbase-examples org.apache.hbase:hbase-external-blockcache org.apache.hbase:hbase-hadoop-compat org.apache.hbase:hbase-http From 643128f2529889b573df1cfaa511e7c050330487 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 7 Mar 2024 07:59:30 +0100 Subject: [PATCH 280/514] HBASE-28415 Remove Curator dependency from hbase-endpoint (#5738) Signed-off-by: Andrew Purtell Signed-off-by: Duo Zhang Signed-off-by: Tak Lon (Stephen) Wu --- hbase-endpoint/pom.xml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index e024fcae002e..ed17e8e48f1f 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -143,10 +143,6 @@ org.slf4j slf4j-api - - org.apache.curator - curator-client - junit junit From 91354a0003e438cca966272c3678d6a5fca03c57 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 7 Mar 2024 17:27:09 +0800 Subject: [PATCH 281/514] HBASE-26489 Update ref guide about the new changes for RpcConnectionRegistry (#5744) Signed-off-by: Nick Dimiduk --- src/main/asciidoc/_chapters/architecture.adoc | 84 +++++++++++++++++-- 1 file changed, 77 insertions(+), 7 deletions(-) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index 12bdc09ac764..3ff09fac63f2 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -261,8 +261,10 @@ For region name, we only accept `byte[]` as the parameter type and it may be a f Information on non-Java clients and custom protocols is covered in <> [[client.masterregistry]] - === Master Registry (new as of 2.3.0) +Starting from 2.5.0, MasterRegistry is deprecated. It's functionality is completely superseded by +the RpcConnectionRegistry. Please see <> for more details. + Client internally works with a _connection registry_ to fetch the metadata needed by connections. This connection registry implementation is responsible for fetching the following metadata. @@ -294,25 +296,29 @@ HMasters instead of ZooKeeper ensemble` To reduce hot-spotting on a single master, all the masters (active & stand-by) expose the needed service to fetch the connection metadata. This lets the client connect to any master (not just active). Both ZooKeeper-based and Master-based connection registry implementations are available in 2.3+. For -2.3 and earlier, the ZooKeeper-based implementation remains the default configuration. -The Master-based implementation becomes the default in 3.0.0. +2.x and earlier, the ZooKeeper-based implementation remains the default configuration. For 3.0.0, +RpcConnectionRegistry becomes the default configuration, as the alternate to MasterRegistry. Change the connection registry implementation by updating the value configured for `hbase.client.registry.impl`. To explicitly enable the ZooKeeper-based registry, use [source, xml] +---- hbase.client.registry.impl org.apache.hadoop.hbase.client.ZKConnectionRegistry - + +---- To explicitly enable the Master-based registry, use [source, xml] +---- hbase.client.registry.impl org.apache.hadoop.hbase.client.MasterRegistry - + +---- ==== MasterRegistry RPC hedging @@ -338,14 +344,78 @@ management. For more implementation details, please refer to the https://github.com/apache/hbase/tree/master/dev-support/design-docs[design doc] and https://issues.apache.org/jira/browse/HBASE-18095[HBASE-18095]. +[[client.rpcconnectionregistry]] +=== Rpc Connection Registry (new as of 2.5.0) +As said in the <> section, there are some disadvantages and limitations +for MasterRegistry, especially that it puts master in the critical path of read/write operations. +In order to address these problems, we introduced a more generic RpcConnectionRegistry. + +It is also rpc based, like MasterRegistry, with several differences + +. Region server also implements the necessary rpc service, so you can config any nodes in the cluster +as bootstrap nodes, not only masters +. Support refreshing bootstrap nodes, for spreading loads across the nodes in the cluster, and also +remove the dead nodes in bootstrap nodes. + +To explicitly enable the Master-based registry, use + +[source, xml] +---- + + hbase.client.registry.impl + org.apache.hadoop.hbase.client.RpcConnectionRegistry + +---- + +To configure the bootstrap nodes, use +[source, xml] +---- + + hbase.client.bootstrap.servers + server1:16020,server2:16020,server3:16020 + +---- + +If not configured, we will fallback to use master addresses as the bootstrap nodes. + +RpcConnectionRegistry is available in 2.5+, and becomes the default client registry implementation in 3.0.0. + +==== RpcConnectionRegistry RPC hedging +Hedged read is still supported, the configuration key is now _hbase.client.bootstrap.hedged.fanout_, and +its default value is still 2. + +==== RpcConnectionRegistry bootstrap nodes refreshing +There are basically two reasons for us to refresh the bootstrap nodes + +* Periodically. This is for spreading loads across the nodes in the cluster. There are two configurations + . _hbase.client.bootstrap.refresh_interval_secs_: the refresh interval in seconds, default 300. A value + less than or equal to zero means disable refreshing. + . _hbase.client.bootstrap.initial_refresh_delay_secs_: the initial refresh interval in seconds, the + default value is 1/10 of _hbase.client.bootstrap.refresh_interval_secs_. The reason why we want to + introduce a separated configuration for the delay for first refreshing is that, as end users could + configure any nodes in a cluster as the initial bootstrap nodes, it is possible that different end + users will configure the same machine which makes the machine over load. So we should have a shorter + delay for the initial refresh, to let users quickly switch to the bootstrap nodes we want them to + connect to. +* When there is a connection error while requesting the nodes, we will refresh immediately, to remove +the dead nodes. To avoid putting too much pressure to the cluster, there is a configuration +_hbase.client.bootstrap.min_secs_between_refreshes_, to control the minimum interval between two +refreshings. The default value is 60, but notice that, if you change +_hbase.client.bootstrap.refresh_interval_secs_ to a small value, you need to make sure to also change +_hbase.client.bootstrap.min_secs_between_refreshes_ to a value smaller than +_hbase.client.bootstrap.refresh_interval_secs_, otherwise an IllegalArgumentException will be thrown. + + ''' -NOTE: (Advanced) In case of any issues with the master based registry, use the following +NOTE: (Advanced) In case of any issues with the rpc/master based registry, use the following configuration to fallback to the ZooKeeper based connection registry implementation. [source, xml] +---- hbase.client.registry.impl org.apache.hadoop.hbase.client.ZKConnectionRegistry - + +---- [[client.filter]] == Client Request Filters From 936d267d1094e37222b9b836ab068689ccce3574 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 7 Mar 2024 18:33:25 +0100 Subject: [PATCH 282/514] HBASE-23324 Deprecate clients that connect to Zookeeper (#5745) Our objective is to remove ZooKeeper from our public interface, remaking it as an internal concern. Connecting to a cluster via ZooKeeper quorum will be considered deprecated starting in 2.6. Our default connection mechanism will switch to via RPC in 3.0 And finally we intend to remove the ZooKeeper connection mechanism from client-facing APIs in 4.0. Signed-off-by: Bryan Beaudreault --- .../hbase/client/ZKConnectionRegistry.java | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 0e13f0b83c91..a46f4d74e382 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -33,6 +33,7 @@ import org.apache.commons.lang3.mutable.MutableInt; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; @@ -51,20 +52,37 @@ /** * Zookeeper based registry implementation. + * @deprecated As of 2.6.0, replaced by {@link RpcConnectionRegistry}, which is the default + * connection mechanism as of 3.0.0. Expected to be removed in 4.0.0. + * @see HBASE-23324 and its parent + * ticket for details. */ -@InterfaceAudience.Private +@Deprecated +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) class ZKConnectionRegistry implements ConnectionRegistry { private static final Logger LOG = LoggerFactory.getLogger(ZKConnectionRegistry.class); + private static final Object WARN_LOCK = new Object(); + private static volatile boolean NEEDS_LOG_WARN = true; + private final ReadOnlyZKClient zk; private final ZNodePaths znodePaths; // User not used, but for rpc based registry we need it - ZKConnectionRegistry(Configuration conf, User user) { + ZKConnectionRegistry(Configuration conf, User ignored) { this.znodePaths = new ZNodePaths(conf); this.zk = new ReadOnlyZKClient(conf); + if (NEEDS_LOG_WARN) { + synchronized (WARN_LOCK) { + if (NEEDS_LOG_WARN) { + LOG.warn( + "ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry"); + NEEDS_LOG_WARN = false; + } + } + } } private interface Converter { From a5748a227ff700b5e4de8fc9b84d5bd2ecec9f7e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 10 Mar 2024 16:39:26 +0800 Subject: [PATCH 283/514] HBASE-28376 Column family ns does not exist in region during upgrade 3.0.0-beta-2 (#5697) Signed-off-by: Bryan Beaudreault --- .../server/master/MasterProcedure.proto | 9 ++ .../master/ClusterSchemaServiceImpl.java | 4 +- .../hbase/master/TableNamespaceManager.java | 135 +++++++++++----- .../MigrateNamespaceTableProcedure.java | 145 ++++++++++++++++++ .../hadoop/hbase/util/FSTableDescriptors.java | 35 +++-- .../master/TestMigrateNamespaceTable.java | 133 +++++++++++++++- 6 files changed, 402 insertions(+), 59 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index c562a4e5c2fe..c9c9c6357312 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -771,3 +771,12 @@ enum MigrateReplicationQueueFromZkToTableState { message MigrateReplicationQueueFromZkToTableStateData { repeated string disabled_peer_id = 1; } + +enum MigrateNamespaceTableProcedureState { + MIGRATE_NAMESPACE_TABLE_ADD_FAMILY = 1; + MIGRATE_NAMESPACE_TABLE_MIGRATE_DATA = 2; + MIGRATE_NAMESPACE_TABLE_DISABLE_TABLE = 3; +} + +message MigrateNamespaceTableProcedureStateData { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java index 39d00d0908ef..27a21a8d1c5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java @@ -59,8 +59,8 @@ protected synchronized void doStart() { try { notifyStarted(); this.tableNamespaceManager.start(); - } catch (IOException ioe) { - notifyFailed(ioe); + } catch (IOException | InterruptedException e) { + notifyFailed(e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index a3b5afe98d51..4d18b2ad8f4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -19,29 +19,29 @@ import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; +import org.apache.hadoop.hbase.master.procedure.MigrateNamespaceTableProcedure; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -66,61 +66,112 @@ public class TableNamespaceManager { private final MasterServices masterServices; + private volatile boolean migrationDone; + TableNamespaceManager(MasterServices masterServices) { this.masterServices = masterServices; } - private void migrateNamespaceTable() throws IOException { - try (Table nsTable = masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); - ResultScanner scanner = nsTable.getScanner( - new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions()); - BufferedMutator mutator = - masterServices.getConnection().getBufferedMutator(TableName.META_TABLE_NAME)) { + private void tryMigrateNamespaceTable() throws IOException, InterruptedException { + Optional opt = masterServices.getProcedures().stream() + .filter(p -> p instanceof MigrateNamespaceTableProcedure) + .map(p -> (MigrateNamespaceTableProcedure) p).findAny(); + if (!opt.isPresent()) { + // the procedure is not present, check whether have the ns family in meta table + TableDescriptor metaTableDesc = + masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME); + if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { + // normal case, upgrading is done or the cluster is created with 3.x code + migrationDone = true; + } else { + // submit the migration procedure + MigrateNamespaceTableProcedure proc = new MigrateNamespaceTableProcedure(); + masterServices.getMasterProcedureExecutor().submitProcedure(proc); + } + } else { + if (opt.get().isFinished()) { + // the procedure is already done + migrationDone = true; + } + // we have already submitted the procedure, continue + } + } + + private void addToCache(Result result, byte[] family, byte[] qualifier) throws IOException { + Cell cell = result.getColumnLatestCell(family, qualifier); + NamespaceDescriptor ns = + ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream + .newInstance(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))); + cache.put(ns.getName(), ns); + } + + private void loadFromMeta() throws IOException { + try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME); + ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) { for (Result result;;) { result = scanner.next(); if (result == null) { break; } - Put put = new Put(result.getRow()); - result - .getColumnCells(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, - TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES) - .forEach(c -> put.addColumn(HConstants.NAMESPACE_FAMILY, - HConstants.NAMESPACE_COL_DESC_QUALIFIER, c.getTimestamp(), CellUtil.cloneValue(c))); - mutator.mutate(put); + addToCache(result, HConstants.NAMESPACE_FAMILY, HConstants.NAMESPACE_COL_DESC_QUALIFIER); } } - // schedule a disable procedure instead of block waiting here, as when disabling a table we will - // wait until master is initialized, but we are part of the initialization... - masterServices.getMasterProcedureExecutor().submitProcedure( - new DisableTableProcedure(masterServices.getMasterProcedureExecutor().getEnvironment(), - TableName.NAMESPACE_TABLE_NAME, false)); } - private void loadNamespaceIntoCache() throws IOException { - try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) { + private void loadFromNamespace() throws IOException { + try (Table table = masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); + ResultScanner scanner = + table.getScanner(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES)) { for (Result result;;) { result = scanner.next(); if (result == null) { break; } - Cell cell = result.getColumnLatestCell(HConstants.NAMESPACE_FAMILY, - HConstants.NAMESPACE_COL_DESC_QUALIFIER); - NamespaceDescriptor ns = ProtobufUtil - .toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream - .newInstance(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))); - cache.put(ns.getName(), ns); + addToCache(result, TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, + TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES); } } } - public void start() throws IOException { - TableState nsTableState = MetaTableAccessor.getTableState(masterServices.getConnection(), - TableName.NAMESPACE_TABLE_NAME); - if (nsTableState != null && nsTableState.isEnabled()) { - migrateNamespaceTable(); + private boolean shouldLoadFromMeta() throws IOException { + if (migrationDone) { + return true; } + // the implementation is bit tricky + // if there is already a disable namespace table procedure or the namespace table is already + // disabled, we are safe to read from meta table as the migration is already done. If not, since + // we are part of the master initialization work, so we can make sure that when reaching here, + // the master has not been marked as initialize yet. And DisableTableProcedure can only be + // executed after master is initialized, so here we are safe to read from namespace table, + // without worrying about that the namespace table is disabled while we are reading and crash + // the master startup. + if ( + masterServices.getTableStateManager().isTableState(TableName.NAMESPACE_TABLE_NAME, + TableState.State.DISABLED) + ) { + return true; + } + if ( + masterServices.getProcedures().stream().filter(p -> p instanceof DisableTableProcedure) + .anyMatch( + p -> ((DisableTableProcedure) p).getTableName().equals(TableName.NAMESPACE_TABLE_NAME)) + ) { + return true; + } + return false; + } + + private void loadNamespaceIntoCache() throws IOException { + if (shouldLoadFromMeta()) { + loadFromMeta(); + } else { + loadFromNamespace(); + } + + } + + public void start() throws IOException, InterruptedException { + tryMigrateNamespaceTable(); loadNamespaceIntoCache(); } @@ -135,7 +186,14 @@ public NamespaceDescriptor get(String name) throws IOException { return cache.get(name); } + private void checkMigrationDone() throws IOException { + if (!migrationDone) { + throw new HBaseIOException("namespace migration is ongoing, modification is disallowed"); + } + } + public void addOrUpdateNamespace(NamespaceDescriptor ns) throws IOException { + checkMigrationDone(); insertNamespaceToMeta(masterServices.getConnection(), ns); cache.put(ns.getName(), ns); } @@ -152,6 +210,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns } public void deleteNamespace(String namespaceName) throws IOException { + checkMigrationDone(); Delete d = new Delete(Bytes.toBytes(namespaceName)); try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) { table.delete(d); @@ -174,6 +233,10 @@ public void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOExcep } } + public void setMigrationDone() { + migrationDone = true; + } + public static long getMaxTables(NamespaceDescriptor ns) throws IOException { String value = ns.getConfigurationValue(KEY_MAX_TABLES); long maxTables = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java new file mode 100644 index 000000000000..dc9eac4c879d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MigrateNamespaceTableProcedureState; + +/** + * Migrate the namespace data to meta table's namespace family while upgrading + */ +@InterfaceAudience.Private +public class MigrateNamespaceTableProcedure + extends StateMachineProcedure + implements GlobalProcedureInterface { + + private static final Logger LOG = LoggerFactory.getLogger(MigrateNamespaceTableProcedure.class); + + private RetryCounter retryCounter; + + @Override + public String getGlobalId() { + return getClass().getSimpleName(); + } + + private void migrate(MasterProcedureEnv env) throws IOException { + Connection conn = env.getMasterServices().getConnection(); + try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME); + ResultScanner scanner = nsTable.getScanner( + new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions()); + BufferedMutator mutator = conn.getBufferedMutator(TableName.META_TABLE_NAME)) { + for (Result result;;) { + result = scanner.next(); + if (result == null) { + break; + } + Put put = new Put(result.getRow()); + result + .getColumnCells(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, + TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES) + .forEach(c -> put.addColumn(HConstants.NAMESPACE_FAMILY, + HConstants.NAMESPACE_COL_DESC_QUALIFIER, c.getTimestamp(), CellUtil.cloneValue(c))); + mutator.mutate(put); + } + } + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, MigrateNamespaceTableProcedureState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + try { + switch (state) { + case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY: + TableDescriptor metaTableDesc = + env.getMasterServices().getTableDescriptors().get(TableName.META_TABLE_NAME); + if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { + TableDescriptor newMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc) + .setColumnFamily( + FSTableDescriptors.getNamespaceFamilyDescForMeta(env.getMasterConfiguration())) + .build(); + addChildProcedure(new ModifyTableProcedure(env, newMetaTableDesc)); + } + setNextState(MigrateNamespaceTableProcedureState.MIGRATE_NAMESPACE_TABLE_MIGRATE_DATA); + return Flow.HAS_MORE_STATE; + case MIGRATE_NAMESPACE_TABLE_MIGRATE_DATA: + migrate(env); + setNextState(MigrateNamespaceTableProcedureState.MIGRATE_NAMESPACE_TABLE_DISABLE_TABLE); + return Flow.HAS_MORE_STATE; + case MIGRATE_NAMESPACE_TABLE_DISABLE_TABLE: + addChildProcedure(new DisableTableProcedure(env, TableName.NAMESPACE_TABLE_NAME, false)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("Unhandled state=" + state); + } + } catch (IOException e) { + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.warn("Failed migrating namespace data, suspend {}secs {}", backoff / 1000, this, e); + throw suspend(Math.toIntExact(backoff), true); + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, MigrateNamespaceTableProcedureState state) + throws IOException, InterruptedException { + } + + @Override + protected MigrateNamespaceTableProcedureState getState(int stateId) { + return MigrateNamespaceTableProcedureState.forNumber(stateId); + } + + @Override + protected int getStateId(MigrateNamespaceTableProcedureState state) { + return state.getNumber(); + } + + @Override + protected MigrateNamespaceTableProcedureState getInitialState() { + return MigrateNamespaceTableProcedureState.MIGRATE_NAMESPACE_TABLE_ADD_FAMILY; + } + + @Override + protected void completionCleanup(MasterProcedureEnv env) { + env.getMasterServices().getClusterSchema().getTableNamespaceManager().setMigrationDone(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 27d66d14ca7c..75bf721ef41e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.yetus.audience.InterfaceAudience; @@ -168,16 +169,28 @@ public static ColumnFamilyDescriptor getTableFamilyDescForMeta(final Configurati .setMaxVersions( conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL).build(); + .setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).setBloomFilterType(BloomType.ROWCOL) + .build(); } public static ColumnFamilyDescriptor getReplBarrierFamilyDescForMeta() { return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) .setMaxVersions(HConstants.ALL_VERSIONS).setInMemory(true) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL).build(); + .setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).setBloomFilterType(BloomType.ROWCOL) + .build(); + } + + public static ColumnFamilyDescriptor getNamespaceFamilyDescForMeta(Configuration conf) { + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.NAMESPACE_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true) + .setBlocksize( + conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).setBloomFilterType(BloomType.ROWCOL) + .build(); } private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) @@ -193,20 +206,10 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con .setBlocksize( conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.ROWCOL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .build()) + .setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).build()) .setColumnFamily(getTableFamilyDescForMeta(conf)) .setColumnFamily(getReplBarrierFamilyDescForMeta()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.NAMESPACE_FAMILY) - .setMaxVersions( - conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) - .setInMemory(true) - .setBlocksize( - conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL).build()) - .setCoprocessor( + .setColumnFamily(getNamespaceFamilyDescForMeta(conf)).setCoprocessor( CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java index b11303636753..30dd308c28f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java @@ -19,20 +19,34 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import java.io.IOException; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -41,6 +55,7 @@ import org.junit.experimental.categories.Category; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** * Testcase for HBASE-21154. @@ -54,6 +69,75 @@ public class TestMigrateNamespaceTable { private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + private static volatile boolean CONTINUE = false; + + // used to halt the migration procedure + public static final class SuspendProcedure extends Procedure + implements TableProcedureInterface { + + @Override + public TableName getTableName() { + return TableName.META_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.CREATE; + } + + @Override + protected LockState acquireLock(final MasterProcedureEnv env) { + if (env.getProcedureScheduler().waitTableExclusiveLock(this, getTableName())) { + return LockState.LOCK_EVENT_WAIT; + } + return LockState.LOCK_ACQUIRED; + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureScheduler().wakeTableExclusiveLock(this, getTableName()); + } + + @Override + protected boolean holdLock(MasterProcedureEnv env) { + return true; + } + + @Override + protected Procedure[] execute(MasterProcedureEnv env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + if (CONTINUE) { + return null; + } + throw suspend(1000, true); + } + + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + + @Override + protected void rollback(MasterProcedureEnv env) throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + return true; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + } + } + @BeforeClass public static void setUp() throws Exception { StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(1) @@ -66,6 +150,32 @@ public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } + // simulate upgrading scenario, where we do not have the ns family + private void removeNamespaceFamily() throws IOException { + FileSystem fs = UTIL.getTestFileSystem(); + Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); + TableDescriptor metaTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + TableDescriptor noNsMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc) + .removeColumnFamily(HConstants.NAMESPACE_FAMILY).build(); + FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, noNsMetaTableDesc, + true); + for (FileStatus status : fs.listStatus(tableDir)) { + if (!status.isDirectory()) { + continue; + } + Path familyPath = new Path(status.getPath(), HConstants.NAMESPACE_FAMILY_STR); + fs.delete(familyPath, true); + } + } + + private void addNamespace(Table table, NamespaceDescriptor nd) throws IOException { + table.put(new Put(Bytes.toBytes(nd.getName())).addColumn( + TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, + TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES, + ProtobufUtil.toProtoNamespaceDescriptor(nd).toByteArray())); + } + @Test public void testMigrate() throws IOException, InterruptedException { UTIL.getAdmin().createTable(TableDescriptorBuilder.NAMESPACE_TABLEDESC); @@ -73,17 +183,21 @@ public void testMigrate() throws IOException, InterruptedException { for (int i = 0; i < 5; i++) { NamespaceDescriptor nd = NamespaceDescriptor.create("Test-NS-" + i) .addConfiguration("key-" + i, "value-" + i).build(); - table.put(new Put(Bytes.toBytes(nd.getName())).addColumn( - TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, - TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES, - ProtobufUtil.toProtoNamespaceDescriptor(nd).toByteArray())); + addNamespace(table, nd); AbstractStateMachineNamespaceProcedure .createDirectory(UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(), nd); } + // add default and system + addNamespace(table, NamespaceDescriptor.DEFAULT_NAMESPACE); + addNamespace(table, NamespaceDescriptor.SYSTEM_NAMESPACE); } MasterThread masterThread = UTIL.getMiniHBaseCluster().getMasterThread(); + masterThread.getMaster().getMasterProcedureExecutor().submitProcedure(new SuspendProcedure()); masterThread.getMaster().stop("For testing"); masterThread.join(); + + removeNamespaceFamily(); + UTIL.getMiniHBaseCluster().startMaster(); // 5 + default and system('hbase') @@ -94,7 +208,16 @@ public void testMigrate() throws IOException, InterruptedException { assertEquals(1, nd.getConfiguration().size()); assertEquals("value-" + i, nd.getConfigurationValue("key-" + i)); } + // before migration done, modification on the namespace is not supported + TableNamespaceManager tableNsMgr = + UTIL.getMiniHBaseCluster().getMaster().getClusterSchema().getTableNamespaceManager(); + assertThrows(IOException.class, () -> tableNsMgr.deleteNamespace("Test-NS-0")); + assertThrows(IOException.class, + () -> tableNsMgr.addOrUpdateNamespace(NamespaceDescriptor.create("NNN").build())); + CONTINUE = true; UTIL.waitFor(30000, () -> UTIL.getAdmin().isTableDisabled(TableName.NAMESPACE_TABLE_NAME)); + // this time it is allowed to change the namespace + UTIL.getAdmin().createNamespace(NamespaceDescriptor.create("NNN").build()); masterThread = UTIL.getMiniHBaseCluster().getMasterThread(); masterThread.getMaster().stop("For testing"); @@ -102,7 +225,7 @@ public void testMigrate() throws IOException, InterruptedException { UTIL.getMiniHBaseCluster().startMaster(); // make sure that we could still restart the cluster after disabling the namespace table. - assertEquals(7, UTIL.getAdmin().listNamespaceDescriptors().length); + assertEquals(8, UTIL.getAdmin().listNamespaceDescriptors().length); // let's delete the namespace table UTIL.getAdmin().deleteTable(TableName.NAMESPACE_TABLE_NAME); From 7641b5423d21d04b2e73f9cf82cc1b97d3382966 Mon Sep 17 00:00:00 2001 From: Jing Yu Date: Sun, 10 Mar 2024 00:43:46 -0800 Subject: [PATCH 284/514] HBASE-28424 Set correct Result to RegionActionResult for successful Put/Delete mutations (#5760) Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/regionserver/RSRpcServices.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 25b229fd0c4e..7043b78c0485 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1046,8 +1046,10 @@ private void doBatchOp(final RegionActionResult.Builder builder, final HRegion r break; case SUCCESS: - builder.addResultOrException( - getResultOrException(ClientProtos.Result.getDefaultInstance(), index)); + ClientProtos.Result result = codes[i].getResult() == null + ? ClientProtos.Result.getDefaultInstance() + : ProtobufUtil.toResult(codes[i].getResult()); + builder.addResultOrException(getResultOrException(result, index)); break; case STORE_TOO_BUSY: From 3b00db0e2c026e9bfaa42f652fe5d78364710910 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 10 Mar 2024 16:52:59 +0800 Subject: [PATCH 285/514] HBASE-28401 Introduce a close method for memstore for release active segment (#5705) Signed-off-by: Bryan Beaudreault --- .../hbase/regionserver/AbstractMemStore.java | 9 ++ .../hadoop/hbase/regionserver/HStore.java | 1 + .../hadoop/hbase/regionserver/MemStore.java | 14 ++- .../apache/hadoop/hbase/HBaseTestingUtil.java | 2 + .../TestCoreRegionCoprocessor.java | 4 + .../TestCacheOnWriteInSchema.java | 17 +-- .../hbase/regionserver/TestHRegion.java | 102 +++++++++--------- .../TestHRegionWithInMemoryFlush.java | 49 ++++----- .../TestMemStoreSegmentsIterator.java | 42 +++----- .../hbase/wal/WALPerformanceEvaluation.java | 4 + 10 files changed, 118 insertions(+), 126 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 62ff6f9a92fd..9a88cab450af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -376,6 +376,15 @@ ImmutableSegment getSnapshot() { return snapshot; } + @Override + public void close() { + // active should never be null + active.close(); + // for snapshot, either it is empty, where we do not reference any real segment which contains a + // memstore lab, or it is during snapshot, where we will clear it when calling clearSnapshot, so + // we do not need to close it here + } + /** Returns an ordered list of segments from most recent to oldest in memstore */ protected abstract List getSegments() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index dccfd0c0af7b..43a63359961e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -726,6 +726,7 @@ private void bulkLoadHFile(HStoreFile sf) throws IOException { } private ImmutableCollection closeWithoutLock() throws IOException { + memstore.close(); // Clear so metrics doesn't find them. ImmutableCollection result = storeEngine.getStoreFileManager().clearFiles(); Collection compactedfiles = storeEngine.getStoreFileManager().clearCompactedFiles(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 947944baf914..cd8eecd54301 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver; +import java.io.Closeable; import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; @@ -31,7 +32,7 @@ *

*/ @InterfaceAudience.Private -public interface MemStore { +public interface MemStore extends Closeable { /** * Creates a snapshot of the current memstore. Snapshot must be cleared by call to @@ -131,4 +132,15 @@ default void startReplayingFromWAL() { default void stopReplayingFromWAL() { return; } + + /** + * Close the memstore. + *

+ * Usually this should only be called when there is nothing in the memstore, unless we are going + * to abort ourselves. + *

+ * For normal cases, this method is only used to fix the reference counting, see HBASE-27941. + */ + @Override + void close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 8f7816106126..79b041f3421a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -1699,6 +1699,8 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws */ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc, WAL wal) throws IOException { + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java index 6b52ce497a2d..e569457c7479 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java @@ -32,7 +32,9 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.ChunkCreator; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; @@ -72,6 +74,8 @@ public void before() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfd).build(); RegionInfo ri = RegionInfoBuilder.newBuilder(tn).build(); this.rss = new MockRegionServerServices(HTU.getConfiguration()); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(), this.rss, null); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 037952035fdf..85cee077dcb6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -134,7 +133,6 @@ public ColumnFamilyDescriptorBuilder modifyFamilySchema(ColumnFamilyDescriptorBu private final String testDescription; private HRegion region; private HStore store; - private WALFactory walFactory; private FileSystem fs; public TestCacheOnWriteInSchema(CacheOnWriteType cowType) { @@ -179,24 +177,17 @@ public void setUp() throws IOException { fs.delete(logdir, true); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); - walFactory = new WALFactory(conf, id); - region = TEST_UTIL.createLocalHRegion(info, conf, htd, walFactory.getWAL(info)); - region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); - store = new HStore(region, hcd, conf, false); + region = HBaseTestingUtil.createRegionAndWAL(info, logdir, conf, htd, + BlockCacheFactory.createBlockCache(conf)); + store = region.getStore(hcd.getName()); } @After public void tearDown() throws IOException { IOException ex = null; try { - region.close(); - } catch (IOException e) { - LOG.warn("Caught Exception", e); - ex = e; - } - try { - walFactory.close(); + HBaseTestingUtil.closeRegionAndWAL(region); } catch (IOException e) { LOG.warn("Caught Exception", e); ex = e; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index d244ca767be2..abeec8a095ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -447,32 +447,24 @@ public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOExcep /** * A test case of HBASE-21041 - * @throws Exception Exception */ @Test public void testFlushAndMemstoreSizeCounting() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); - try { - for (byte[] row : HBaseTestingUtil.ROWS) { - Put put = new Put(row); - put.addColumn(family, family, row); - region.put(put); - } - region.flush(true); - // After flush, data size should be zero - assertEquals(0, region.getMemStoreDataSize()); - // After flush, a new active mutable segment is created, so the heap size - // should equal to MutableSegment.DEEP_OVERHEAD - assertEquals(MutableSegment.DEEP_OVERHEAD, region.getMemStoreHeapSize()); - // After flush, offheap should be zero - assertEquals(0, region.getMemStoreOffHeapSize()); - } finally { - HBaseTestingUtil.closeRegionAndWAL(this.region); - this.region = null; - wals.close(); + for (byte[] row : HBaseTestingUtil.ROWS) { + Put put = new Put(row); + put.addColumn(family, family, row); + region.put(put); } + region.flush(true); + // After flush, data size should be zero + assertEquals(0, region.getMemStoreDataSize()); + // After flush, a new active mutable segment is created, so the heap size + // should equal to MutableSegment.DEEP_OVERHEAD + assertEquals(MutableSegment.DEEP_OVERHEAD, region.getMemStoreHeapSize()); + // After flush, offheap should be zero + assertEquals(0, region.getMemStoreOffHeapSize()); } /** @@ -1283,6 +1275,12 @@ public long getSyncedLength() { // throwing a DroppedSnapshotException to force an abort. Just clean up the mess. region.close(true); wal.close(); + // release the snapshot and active segment, so netty will not report memory leak + for (HStore store : region.getStores()) { + AbstractMemStore memstore = (AbstractMemStore) store.memstore; + memstore.doClearSnapShot(); + memstore.close(); + } // 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH }; @@ -1297,15 +1295,18 @@ public long getSyncedLength() { // DroppedSnapshotException. Below COMMIT_FLUSH will cause flush to abort wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH }; - try { - region.flush(true); - fail("This should have thrown exception"); - } catch (DroppedSnapshotException expected) { - // we expect this exception, since we were able to write the snapshot, but failed to - // write the flush marker to WAL - } catch (IOException unexpected) { - throw unexpected; + // we expect this exception, since we were able to write the snapshot, but failed to + // write the flush marker to WAL + assertThrows(DroppedSnapshotException.class, () -> region.flush(true)); + + region.close(true); + // release the snapshot and active segment, so netty will not report memory leak + for (HStore store : region.getStores()) { + AbstractMemStore memstore = (AbstractMemStore) store.memstore; + memstore.doClearSnapShot(); + memstore.close(); } + region = null; } @Test @@ -3735,14 +3736,14 @@ public void testGetScanner_WithRegionClosed() throws IOException { byte[][] families = { fam1, fam2 }; // Setting up region + region = initHRegion(tableName, method, CONF, families); + region.closed.set(true); try { - this.region = initHRegion(tableName, method, CONF, families); - } catch (IOException e) { - e.printStackTrace(); - fail("Got IOException during initHRegion, " + e.getMessage()); + assertThrows(NotServingRegionException.class, () -> region.getScanner(null)); + } finally { + // so we can close the region in tearDown + region.closed.set(false); } - region.closed.set(true); - assertThrows(NotServingRegionException.class, () -> region.getScanner(null)); } @Test @@ -4543,14 +4544,14 @@ public void flush() { /** * So can be overridden in subclasses. */ - int getNumQualifiersForTestWritesWhileScanning() { + protected int getNumQualifiersForTestWritesWhileScanning() { return 100; } /** * So can be overridden in subclasses. */ - int getTestCountForTestWritesWhileScanning() { + protected int getTestCountForTestWritesWhileScanning() { return 100; } @@ -5829,12 +5830,12 @@ protected HRegion initHRegion(TableName tableName, String callingMethod, Configu * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} * when done. */ - protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, + private HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { return initHRegion(tableName, null, null, callingMethod, conf, isReadOnly, families); } - protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + private HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); @@ -5849,7 +5850,7 @@ protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopK * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} * when done. */ - public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, @@ -6708,14 +6709,12 @@ public void testCloseRegionWrittenToWAL() throws Exception { WAL wal = mockWAL(); when(rss.getWAL(any(RegionInfo.class))).thenReturn(wal); - // create and then open a region first so that it can be closed later - region = - HRegion.createHRegion(hri, rootDir, TEST_UTIL.getConfiguration(), htd, rss.getWAL(hri)); - region = - HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); - - // close the region - region.close(false); + // create the region + region = HBaseTestingUtil.createRegionAndWAL(hri, rootDir, CONF, htd); + HBaseTestingUtil.closeRegionAndWAL(region); + region = null; + // open the region first and then close it + HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null).close(); // 2 times, one for region open, the other close region verify(wal, times(2)).appendMarker(any(RegionInfo.class), (WALKeyImpl) any(WALKeyImpl.class), @@ -7249,7 +7248,7 @@ public void testCheckAndRowMutateTimestampsAreMonotonic() throws IOException { qual2.length)); } - HRegion initHRegion(TableName tableName, String callingMethod, byte[]... families) + private HRegion initHRegion(TableName tableName, String callingMethod, byte[]... families) throws IOException { return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families); } @@ -7727,12 +7726,7 @@ public void run() { holder.start(); latch.await(); - try { - region.close(); - } catch (IOException e) { - LOG.info("Caught expected exception", e); - } - region = null; + assertThrows(IOException.class, () -> region.close()); holder.join(); // Verify the region tried to abort the server diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java index f43d265d5714..27bdae43857f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -28,8 +30,6 @@ import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -50,7 +50,7 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion { * when done. */ @Override - public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { boolean[] inMemory = new boolean[families.length]; @@ -64,7 +64,7 @@ public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, } @Override - int getTestCountForTestWritesWhileScanning() { + protected int getTestCountForTestWritesWhileScanning() { return 10; } @@ -73,44 +73,35 @@ int getTestCountForTestWritesWhileScanning() { * easy on it. See if that helps. */ @Override - int getNumQualifiersForTestWritesWhileScanning() { + protected int getNumQualifiersForTestWritesWhileScanning() { return 10; } /** * A test case of HBASE-21041 - * @throws Exception Exception */ @Override @Test public void testFlushAndMemstoreSizeCounting() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); int count = 0; - try { - for (byte[] row : HBaseTestingUtil.ROWS) { - Put put = new Put(row); - put.addColumn(family, family, row); - region.put(put); - // In memory flush every 1000 puts - if (count++ % 1000 == 0) { - ((CompactingMemStore) (region.getStore(family).memstore)).flushInMemory(); - } + for (byte[] row : HBaseTestingUtil.ROWS) { + Put put = new Put(row); + put.addColumn(family, family, row); + region.put(put); + // In memory flush every 1000 puts + if (count++ % 1000 == 0) { + ((CompactingMemStore) (region.getStore(family).memstore)).flushInMemory(); } - region.flush(true); - // After flush, data size should be zero - Assert.assertEquals(0, region.getMemStoreDataSize()); - // After flush, a new active mutable segment is created, so the heap size - // should equal to MutableSegment.DEEP_OVERHEAD - Assert.assertEquals(MutableSegment.DEEP_OVERHEAD, region.getMemStoreHeapSize()); - // After flush, offheap size should be zero - Assert.assertEquals(0, region.getMemStoreOffHeapSize()); - - } finally { - HBaseTestingUtil.closeRegionAndWAL(this.region); - this.region = null; - wals.close(); } + region.flush(true); + // After flush, data size should be zero + assertEquals(0, region.getMemStoreDataSize()); + // After flush, a new active mutable segment is created, so the heap size + // should equal to MutableSegment.DEEP_OVERHEAD + assertEquals(MutableSegment.DEEP_OVERHEAD, region.getMemStoreHeapSize()); + // After flush, offheap size should be zero + assertEquals(0, region.getMemStoreOffHeapSize()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java index e75dc5e0e50e..6f5ef2c10257 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -60,18 +59,17 @@ public class TestMemStoreSegmentsIterator { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMemStoreSegmentsIterator.class); - protected static String TABLE = "test_mscsi"; - protected static String FAMILY = "f"; - protected static String COLUMN = "c"; - protected static String ROOT_SUB_PATH = "testMemStoreSegmentsIterator"; - protected static long LESS_THAN_INTEGER_MAX_VALUE_SEQ_ID = Long.valueOf(Integer.MAX_VALUE) - 1; - protected static long GREATER_THAN_INTEGER_MAX_VALUE_SEQ_ID = Long.valueOf(Integer.MAX_VALUE) + 1; + private static String TABLE = "test_mscsi"; + private static String FAMILY = "f"; + private static String COLUMN = "c"; + private static String ROOT_SUB_PATH = "testMemStoreSegmentsIterator"; + private static long LESS_THAN_INTEGER_MAX_VALUE_SEQ_ID = Long.valueOf(Integer.MAX_VALUE) - 1; + private static long GREATER_THAN_INTEGER_MAX_VALUE_SEQ_ID = Long.valueOf(Integer.MAX_VALUE) + 1; - protected CellComparator comparator; - protected int compactionKVMax; - protected WAL wal; - protected HRegion region; - protected HStore store; + private CellComparator comparator; + private int compactionKVMax; + private HRegion region; + private HStore store; @Before public void setup() throws IOException { @@ -85,10 +83,9 @@ public void setup() throws IOException { RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE)).build(); Path rootPath = hbaseUtility.getDataTestDir(ROOT_SUB_PATH); - this.wal = HBaseTestingUtil.createWal(conf, rootPath, info); this.region = - HRegion.createHRegion(info, rootPath, conf, tableDescriptorBuilder.build(), this.wal, true); - this.store = new HStore(this.region, columnFamilyDescriptor, conf, false); + HBaseTestingUtil.createRegionAndWAL(info, rootPath, conf, tableDescriptorBuilder.build()); + this.store = region.getStore(columnFamilyDescriptor.getName()); this.comparator = CellComparator.getInstance(); this.compactionKVMax = HConstants.COMPACTION_KV_MAX_DEFAULT; } @@ -150,21 +147,8 @@ protected void verifyNext(MemStoreSegmentsIterator iterator) { @After public void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); - if (store != null) { - try { - store.close(); - } catch (IOException e) { - } - store = null; - } if (region != null) { - region.close(); - region = null; - } - - if (wal != null) { - wal.close(); - wal = null; + HBaseTestingUtil.closeRegionAndWAL(region); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index fcbbb3cb6996..c462e2e5c621 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LogRoller; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.trace.TraceUtil; @@ -266,6 +267,9 @@ public int run(String[] args) throws Exception { // Internal config. goes off number of threads; if more threads than handlers, stuff breaks. // In regionserver, number of handlers == number of threads. getConf().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, numThreads); + // We do not need memstore here, so disable memstore lab, otherwise we need to initialize + // ChunkCreator + getConf().setBoolean(MemStoreLAB.USEMSLAB_KEY, false); if (rootRegionDir == null) { TEST_UTIL = new HBaseTestingUtil(getConf()); From 76c632c6d1c505b670da375681eb605437d233ad Mon Sep 17 00:00:00 2001 From: guluo Date: Sun, 10 Mar 2024 21:27:44 +0800 Subject: [PATCH 286/514] HBASE-28395 TableNotFoundException when executing 'hbase hbck' (#5706) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/util/HBaseFsck.java | 5 ++ .../hbase/util/hbck/ReplicationChecker.java | 4 ++ ...HBaseFsckWithoutTableHbaseReplication.java | 70 +++++++++++++++++++ 3 files changed, 79 insertions(+) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckWithoutTableHbaseReplication.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 31020cf4bce5..0d24ef783762 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -2572,6 +2572,11 @@ private synchronized HbckRegionInfo getOrCreateInfo(String name) { private void checkAndFixReplication() throws ReplicationException, IOException { ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors); + + if (!checker.checkHasDataInQueues()) { + return; + } + checker.checkUnDeletedQueues(); if (checker.hasUnDeletedQueues() && this.fixReplication) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java index 497304a31113..f92631eb7924 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java @@ -130,4 +130,8 @@ public void fixUnDeletedQueues() throws ReplicationException { queueStorage.removePeerFromHFileRefs(peerId); } } + + public boolean checkHasDataInQueues() throws ReplicationException { + return queueStorage.hasData(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckWithoutTableHbaseReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckWithoutTableHbaseReplication.java new file mode 100644 index 000000000000..279962c934fd --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckWithoutTableHbaseReplication.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +@Category({ MiscTests.class, MediumTests.class }) +public class TestHBaseFsckWithoutTableHbaseReplication { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHBaseFsckWithoutTableHbaseReplication.class); + + @ClassRule + public static final TestName name = new TestName(); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + private static final TableName tableName = + TableName.valueOf("replication_" + name.getMethodName()); + + @Before + public void setUp() throws Exception { + UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false); + UTIL.getConfiguration().set(ReplicationStorageFactory.REPLICATION_QUEUE_TABLE_NAME, + tableName.getNameAsString()); + UTIL.startMiniCluster(1); + } + + @After + public void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void test() throws Exception { + assertFalse(UTIL.getAdmin().tableExists(tableName)); + HBaseFsck hBaseFsck = HbckTestingUtil.doFsck(UTIL.getConfiguration(), true); + assertEquals(0, hBaseFsck.getRetCode()); + } +} From 34b738d2ac04f7a9acead98b90ee1e2f220afff5 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Mon, 11 Mar 2024 09:11:01 -0400 Subject: [PATCH 287/514] HBASE-28260: Add NO_WRITE_LOCAL flag to WAL file creation (#5733) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang Signed-off-by: Wei-Chiu Chuang --- .../hbase/io/asyncfs/AsyncFSOutputHelper.java | 5 +++-- .../FanOutOneBlockAsyncDFSOutputHelper.java | 16 ++++++++++------ .../TestFanOutOneBlockAsyncDFSOutput.java | 17 +++++++++-------- .../TestFanOutOneBlockAsyncDFSOutputHang.java | 2 +- .../hbase/io/asyncfs/TestLocalAsyncOutput.java | 2 +- .../TestSaslFanOutOneBlockAsyncDFSOutput.java | 2 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 4 ++++ .../wal/AbstractProtobufLogWriter.java | 8 ++++++-- .../wal/AsyncProtobufLogWriter.java | 4 ++-- .../regionserver/wal/ProtobufLogWriter.java | 11 ++++++++--- 10 files changed, 45 insertions(+), 26 deletions(-) diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java index a530ca4a2a0d..cbb0648f3afb 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java @@ -48,11 +48,12 @@ private AsyncFSOutputHelper() { */ public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, - Class channelClass, StreamSlowMonitor monitor) + Class channelClass, StreamSlowMonitor monitor, boolean noLocalWrite) throws IOException, CommonFSUtils.StreamLacksCapabilityException { if (fs instanceof DistributedFileSystem) { return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, - overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); + overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor, + noLocalWrite); } final FSDataOutputStream out; int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 98590173ed2a..d4a71a77a79d 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -445,20 +445,24 @@ public NameNodeException(Throwable cause) { } } - private static EnumSetWritable getCreateFlags(boolean overwrite) { + private static EnumSetWritable getCreateFlags(boolean overwrite, + boolean noLocalWrite) { List flags = new ArrayList<>(); flags.add(CreateFlag.CREATE); if (overwrite) { flags.add(CreateFlag.OVERWRITE); } + if (noLocalWrite) { + flags.add(CreateFlag.NO_LOCAL_WRITE); + } flags.add(CreateFlag.SHOULD_REPLICATE); return new EnumSetWritable<>(EnumSet.copyOf(flags)); } private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, - EventLoopGroup eventLoopGroup, Class channelClass, StreamSlowMonitor monitor) - throws IOException { + EventLoopGroup eventLoopGroup, Class channelClass, StreamSlowMonitor monitor, + boolean noLocalWrite) throws IOException { Configuration conf = dfs.getConf(); DFSClient client = dfs.getClient(); String clientName = client.getClientName(); @@ -475,7 +479,7 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d try { stat = FILE_CREATOR.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, - getCreateFlags(overwrite), createParent, replication, blockSize, + getCreateFlags(overwrite, noLocalWrite), createParent, replication, blockSize, CryptoProtocolVersion.supported()); } catch (Exception e) { if (e instanceof RemoteException) { @@ -561,14 +565,14 @@ public void operationComplete(Future future) throws Exception { public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class channelClass, - final StreamSlowMonitor monitor) throws IOException { + final StreamSlowMonitor monitor, boolean noLocalWrite) throws IOException { return new FileSystemLinkResolver() { @Override public FanOutOneBlockAsyncDFSOutput doCall(Path p) throws IOException, UnresolvedLinkException { return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, - blockSize, eventLoopGroup, channelClass, monitor); + blockSize, eventLoopGroup, channelClass, monitor, noLocalWrite); } @Override diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java index 68b8bfa3d9f3..f0910684eddf 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java @@ -141,7 +141,7 @@ public void test() throws IOException, InterruptedException, ExecutionException Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, - false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); + false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true); writeAndVerify(FS, f, out); } @@ -154,7 +154,7 @@ public void test0Recover() throws IOException, InterruptedException, ExecutionEx Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, - false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); + false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true); byte[] b = new byte[10]; Bytes.random(b); out.write(b, 0, b.length); @@ -183,7 +183,7 @@ public void testHeartbeat() throws IOException, InterruptedException, ExecutionE Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, - false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); + false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true); Thread.sleep(READ_TIMEOUT_MS * 2); // the connection to datanode should still alive. writeAndVerify(FS, f, out); @@ -198,7 +198,7 @@ public void testCreateParentFailed() throws IOException { EventLoop eventLoop = EVENT_LOOP_GROUP.next(); try { FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, - FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true); fail("should fail with parent does not exist"); } catch (RemoteException e) { LOG.info("expected exception caught", e); @@ -220,8 +220,9 @@ public void testConnectToDatanodeFailed() DataNodeProperties dnProp = CLUSTER.stopDataNode(0); Path f = new Path("/test"); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); - try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, - f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR)) { + try (FanOutOneBlockAsyncDFSOutput output = + FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); } finally { @@ -251,7 +252,7 @@ public void testExcludeFailedConnectToDatanode() assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, - FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) { + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor, true)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); @@ -266,7 +267,7 @@ public void testWriteLargeChunk() throws IOException, InterruptedException, Exec Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, - false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS, MONITOR); + false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS, MONITOR, true); byte[] b = new byte[50 * 1024 * 1024]; Bytes.random(b); out.write(b); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java index 77752789dbb3..7f6535a93a93 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java @@ -98,7 +98,7 @@ public static void setUp() throws Exception { Path f = new Path("/testHang"); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); OUT = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 2, - FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true); } @AfterClass diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java index d1ce128b118d..4171b60c5b82 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java @@ -65,7 +65,7 @@ public void test() throws IOException, InterruptedException, ExecutionException, Path f = new Path(TEST_UTIL.getDataTestDir(), "test"); FileSystem fs = FileSystem.getLocal(TEST_UTIL.getConfiguration()); AsyncFSOutput out = AsyncFSOutputHelper.createOutput(fs, f, false, true, - fs.getDefaultReplication(f), fs.getDefaultBlockSize(f), GROUP, CHANNEL_CLASS, MONITOR); + fs.getDefaultReplication(f), fs.getDefaultBlockSize(f), GROUP, CHANNEL_CLASS, MONITOR, true); TestFanOutOneBlockAsyncDFSOutput.writeAndVerify(fs, f, out); } } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java index 479b8f4e6034..99048ff2bed1 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java @@ -255,7 +255,7 @@ private Path getEncryptionTestFile() { private void test(Path file) throws IOException, InterruptedException, ExecutionException { EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, file, - true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); + true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true); TestFanOutOneBlockAsyncDFSOutput.writeAndVerify(FS, file, out); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index ef25068512f0..a94d827e8e2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -179,6 +179,10 @@ public abstract class AbstractFSWAL implements WAL { public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; + public static final String WAL_AVOID_LOCAL_WRITES_KEY = + "hbase.regionserver.wal.avoid-local-writes"; + public static final boolean WAL_AVOID_LOCAL_WRITES_DEFAULT = false; + /** * file system instance */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java index 890fb4e444c7..e6463c563a05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver.wal; +import static org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.WAL_AVOID_LOCAL_WRITES_DEFAULT; +import static org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.WAL_AVOID_LOCAL_WRITES_KEY; import static org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufWALReader.DEFAULT_WAL_TRAILER_WARN_SIZE; import static org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufWALReader.PB_WAL_COMPLETE_MAGIC; import static org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufWALReader.PB_WAL_MAGIC; @@ -163,8 +165,10 @@ public void init(FileSystem fs, Path path, Configuration conf, boolean overwrita int bufferSize = CommonFSUtils.getDefaultBufferSize(fs); short replication = (short) conf.getInt("hbase.regionserver.hlog.replication", CommonFSUtils.getDefaultReplication(fs, path)); + boolean noLocalWrite = + conf.getBoolean(WAL_AVOID_LOCAL_WRITES_KEY, WAL_AVOID_LOCAL_WRITES_DEFAULT); - initOutput(fs, path, overwritable, bufferSize, replication, blocksize, monitor); + initOutput(fs, path, overwritable, bufferSize, replication, blocksize, monitor, noLocalWrite); boolean doTagCompress = doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); @@ -253,7 +257,7 @@ protected final void writeWALTrailer() { } protected abstract void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) + short replication, long blockSize, StreamSlowMonitor monitor, boolean noLocalWrite) throws IOException, StreamLacksCapabilityException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index e50a02f6f80d..f10f39222722 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -178,10 +178,10 @@ public AsyncFSOutput getOutput() { @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) + short replication, long blockSize, StreamSlowMonitor monitor, boolean noLocalWrite) throws IOException, StreamLacksCapabilityException { this.output = AsyncFSOutputHelper.createOutput(fs, path, overwritable, false, replication, - blockSize, eventLoopGroup, channelClass, monitor); + blockSize, eventLoopGroup, channelClass, monitor, noLocalWrite); this.asyncOutputWrapper = new OutputStreamWrapper(output); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java index 212788c940ed..52317949cc83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java @@ -100,13 +100,18 @@ public FSDataOutputStream getStream() { @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) + short replication, long blockSize, StreamSlowMonitor monitor, boolean noLocalWrite) throws IOException, StreamLacksCapabilityException { FSDataOutputStreamBuilder builder = fs.createFile(path).overwrite(overwritable) .bufferSize(bufferSize).replication(replication).blockSize(blockSize); if (builder instanceof DistributedFileSystem.HdfsDataOutputStreamBuilder) { - this.output = - ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder).replicate().build(); + DistributedFileSystem.HdfsDataOutputStreamBuilder dfsBuilder = + (DistributedFileSystem.HdfsDataOutputStreamBuilder) builder; + dfsBuilder.replicate(); + if (noLocalWrite) { + dfsBuilder.noLocalWrite(); + } + this.output = dfsBuilder.build(); } else { this.output = builder.build(); } From beafd332618ec81febaf1fcfb8bb8c216de61164 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 13 Mar 2024 10:36:54 -0700 Subject: [PATCH 288/514] HBASE-28419 Allow Action and Policies of ServerKillingMonkey to be configurable. (#5743) Signed-off-by: Nick Dimiduk --- .../chaos/factories/MonkeyConstants.java | 13 +++++ ...erAndDependenciesKillingMonkeyFactory.java | 52 +++++++++++++++---- .../factories/ServerKillingMonkeyFactory.java | 28 +++++++--- 3 files changed, 77 insertions(+), 16 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyConstants.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyConstants.java index fa001e085442..0263a568d965 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyConstants.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyConstants.java @@ -20,6 +20,7 @@ import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.TimeUnit; public interface MonkeyConstants { @@ -45,6 +46,11 @@ public interface MonkeyConstants { String UNBALANCE_WAIT_AFTER_BALANCE_MS = "unbalance.action.wait.after.period"; String UNBALANCE_KILL_META_RS = "unbalance.action.kill.meta.rs"; String DECREASE_HFILE_SIZE_SLEEP_TIME = "decrease.hfile.size.sleep.time"; + String RESTART_RANDOM_RS_EXCEPTION_SLEEP_TIME = "restart.random.rs.exception.sleep.time"; + String RESTART_ACTIVE_NAMENODE_SLEEP_TIME = "restart.active.namenode.sleep.time"; + String RESTART_RANDOM_DATANODE_SLEEP_TIME = "restart.random.datanode.sleep.time"; + String RESTART_RANDOM_JOURNALNODE_SLEEP_TIME = "restart.random.journalnode.sleep.time"; + String RESTART_RANDOM_ZKNODE_SLEEP_TIME = "restart.random.zknode.sleep.time"; String GRACEFUL_RESTART_RS_SLEEP_TIME = "graceful.restart.rs.sleep.time"; String ROLLING_BATCH_SUSPEND_RS_SLEEP_TIME = "rolling.batch.suspend.rs.sleep.time"; String ROLLING_BATCH_SUSPEND_RS_RATIO = "rolling.batch.suspend.rs.ratio"; @@ -92,6 +98,13 @@ public interface MonkeyConstants { long DEFAULT_UNBALANCE_WAIT_AFTER_BALANCE_MS = 5 * 1000; boolean DEFAULT_UNBALANCE_KILL_META_RS = true; long DEFAULT_DECREASE_HFILE_SIZE_SLEEP_TIME = 30 * 1000; + + long DEFAULT_RESTART_RANDOM_RS_EXCEPTION_SLEEP_TIME = TimeUnit.MILLISECONDS.toMillis(60000); + long DEFAULT_RESTART_ACTIVE_NAMENODE_SLEEP_TIME = TimeUnit.MILLISECONDS.toMillis(60000); + long DEFAULT_RESTART_RANDOM_DATANODE_SLEEP_TIME = TimeUnit.MILLISECONDS.toMillis(60000); + long DEFAULT_RESTART_RANDOM_JOURNALNODE_SLEEP_TIME = TimeUnit.MILLISECONDS.toMillis(60000); + long DEFAULT_RESTART_RANDOM_ZKNODE_SLEEP_TIME = TimeUnit.MILLISECONDS.toMillis(60000); + long DEFAULT_GRACEFUL_RESTART_RS_SLEEP_TIME = 5000; long DEFAULT_ROLLING_BATCH_SUSPEND_RS_SLEEP_TIME = 30 * 1000; float DEFAULT_ROLLING_BATCH_SUSPEND_RS_RATIO = 1.0f; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerAndDependenciesKillingMonkeyFactory.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerAndDependenciesKillingMonkeyFactory.java index 8b3d10c46476..28dce4813148 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerAndDependenciesKillingMonkeyFactory.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerAndDependenciesKillingMonkeyFactory.java @@ -42,9 +42,17 @@ */ public class ServerAndDependenciesKillingMonkeyFactory extends MonkeyFactory { + private long restartRandomRsExceptMetaSleepTime; + private long restartActiveMasterSleepTime; + private long rollingBatchRestartRSSleepTime; + private long restartActiveNameNodeSleepTime; + private long restartRandomDataNodeSleepTime; + private long restartRandomJournalNodeSleepTime; + private long restartRandomZKNodeSleepTime; private long gracefulRollingRestartTSSLeepTime; private long rollingBatchSuspendRSSleepTime; private float rollingBatchSuspendtRSRatio; + private long action1Period; @Override public ChaosMonkey build() { @@ -53,15 +61,15 @@ public ChaosMonkey build() { // Destructive actions to mess things around. Cannot run batch restart. // @formatter:off Action[] actions1 = new Action[] { - new RestartRandomRsExceptMetaAction(60000), - new RestartActiveMasterAction(5000), + new RestartRandomRsExceptMetaAction(restartRandomRsExceptMetaSleepTime), + new RestartActiveMasterAction(restartActiveMasterSleepTime), // only allow 2 servers to be dead. - new RollingBatchRestartRsAction(5000, 1.0f, 2, true), + new RollingBatchRestartRsAction(rollingBatchRestartRSSleepTime, 1.0f, 2, true), new ForceBalancerAction(), - new RestartActiveNameNodeAction(60000), - new RestartRandomDataNodeAction(60000), - new RestartRandomJournalNodeAction(60000), - new RestartRandomZKNodeAction(60000), + new RestartActiveNameNodeAction(restartActiveNameNodeSleepTime), + new RestartRandomDataNodeAction(restartRandomDataNodeSleepTime), + new RestartRandomJournalNodeAction(restartRandomJournalNodeSleepTime), + new RestartRandomZKNodeAction(restartRandomZKNodeSleepTime), new GracefulRollingRestartRsAction(gracefulRollingRestartTSSLeepTime), new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime, rollingBatchSuspendtRSRatio) @@ -73,12 +81,33 @@ public ChaosMonkey build() { new Action[] { new DumpClusterStatusAction(), new DumpHdfsClusterStatusAction() }; return new PolicyBasedChaosMonkey(properties, util, - new CompositeSequentialPolicy(new DoActionsOncePolicy(60 * 1000, actions1), - new PeriodicRandomActionPolicy(60 * 1000, actions1)), - new PeriodicRandomActionPolicy(60 * 1000, actions2)); + new CompositeSequentialPolicy(new DoActionsOncePolicy(action1Period, actions1), + new PeriodicRandomActionPolicy(action1Period, actions1)), + new PeriodicRandomActionPolicy(action1Period, actions2)); } private void loadProperties() { + restartRandomRsExceptMetaSleepTime = Long + .parseLong(this.properties.getProperty(MonkeyConstants.RESTART_RANDOM_RS_EXCEPTION_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_RANDOM_RS_EXCEPTION_SLEEP_TIME + "")); + restartActiveMasterSleepTime = + Long.parseLong(this.properties.getProperty(MonkeyConstants.RESTART_ACTIVE_MASTER_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_ACTIVE_MASTER_SLEEP_TIME + "")); + rollingBatchRestartRSSleepTime = Long + .parseLong(this.properties.getProperty(MonkeyConstants.ROLLING_BATCH_RESTART_RS_SLEEP_TIME, + MonkeyConstants.DEFAULT_ROLLING_BATCH_RESTART_RS_SLEEP_TIME + "")); + restartActiveNameNodeSleepTime = + Long.parseLong(this.properties.getProperty(MonkeyConstants.RESTART_ACTIVE_NAMENODE_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_ACTIVE_NAMENODE_SLEEP_TIME + "")); + restartRandomDataNodeSleepTime = + Long.parseLong(this.properties.getProperty(MonkeyConstants.RESTART_RANDOM_DATANODE_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_RANDOM_DATANODE_SLEEP_TIME + "")); + restartRandomJournalNodeSleepTime = Long + .parseLong(this.properties.getProperty(MonkeyConstants.RESTART_RANDOM_JOURNALNODE_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_RANDOM_JOURNALNODE_SLEEP_TIME + "")); + restartRandomZKNodeSleepTime = + Long.parseLong(this.properties.getProperty(MonkeyConstants.RESTART_RANDOM_ZKNODE_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_RANDOM_ZKNODE_SLEEP_TIME + "")); gracefulRollingRestartTSSLeepTime = Long.parseLong(this.properties.getProperty(MonkeyConstants.GRACEFUL_RESTART_RS_SLEEP_TIME, MonkeyConstants.DEFAULT_GRACEFUL_RESTART_RS_SLEEP_TIME + "")); @@ -88,5 +117,8 @@ private void loadProperties() { rollingBatchSuspendtRSRatio = Float.parseFloat(this.properties.getProperty(MonkeyConstants.ROLLING_BATCH_SUSPEND_RS_RATIO, MonkeyConstants.DEFAULT_ROLLING_BATCH_SUSPEND_RS_RATIO + "")); + action1Period = + Long.parseLong(this.properties.getProperty(MonkeyConstants.PERIODIC_ACTION1_PERIOD, + MonkeyConstants.DEFAULT_PERIODIC_ACTION1_PERIOD + "")); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerKillingMonkeyFactory.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerKillingMonkeyFactory.java index 9d49a1f92933..7b58d217040c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerKillingMonkeyFactory.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/ServerKillingMonkeyFactory.java @@ -37,9 +37,13 @@ */ public class ServerKillingMonkeyFactory extends MonkeyFactory { + private long restartRandomRsExceptMetaSleepTime; + private long restartActiveMasterSleepTime; + private long rollingBatchRestartRSSleepTime; private long gracefulRollingRestartTSSLeepTime; private long rollingBatchSuspendRSSleepTime; private float rollingBatchSuspendtRSRatio; + private long action1Period; @Override public ChaosMonkey build() { @@ -48,10 +52,10 @@ public ChaosMonkey build() { // Destructive actions to mess things around. Cannot run batch restart // @formatter:off Action[] actions1 = new Action[] { - new RestartRandomRsExceptMetaAction(60000), - new RestartActiveMasterAction(5000), + new RestartRandomRsExceptMetaAction(restartRandomRsExceptMetaSleepTime), + new RestartActiveMasterAction(restartActiveMasterSleepTime), // only allow 2 servers to be dead - new RollingBatchRestartRsAction(5000, 1.0f, 2, true), + new RollingBatchRestartRsAction(rollingBatchRestartRSSleepTime, 1.0f, 2, true), new ForceBalancerAction(), new GracefulRollingRestartRsAction(gracefulRollingRestartTSSLeepTime), new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime, @@ -63,12 +67,21 @@ public ChaosMonkey build() { Action[] actions2 = new Action[] { new DumpClusterStatusAction() }; return new PolicyBasedChaosMonkey(properties, util, - new CompositeSequentialPolicy(new DoActionsOncePolicy(60 * 1000, actions1), - new PeriodicRandomActionPolicy(60 * 1000, actions1)), - new PeriodicRandomActionPolicy(60 * 1000, actions2)); + new CompositeSequentialPolicy(new DoActionsOncePolicy(action1Period, actions1), + new PeriodicRandomActionPolicy(action1Period, actions1)), + new PeriodicRandomActionPolicy(action1Period, actions2)); } private void loadProperties() { + restartRandomRsExceptMetaSleepTime = Long + .parseLong(this.properties.getProperty(MonkeyConstants.RESTART_RANDOM_RS_EXCEPTION_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_RANDOM_RS_EXCEPTION_SLEEP_TIME + "")); + restartActiveMasterSleepTime = + Long.parseLong(this.properties.getProperty(MonkeyConstants.RESTART_ACTIVE_MASTER_SLEEP_TIME, + MonkeyConstants.DEFAULT_RESTART_ACTIVE_MASTER_SLEEP_TIME + "")); + rollingBatchRestartRSSleepTime = Long + .parseLong(this.properties.getProperty(MonkeyConstants.ROLLING_BATCH_RESTART_RS_SLEEP_TIME, + MonkeyConstants.DEFAULT_ROLLING_BATCH_RESTART_RS_SLEEP_TIME + "")); gracefulRollingRestartTSSLeepTime = Long.parseLong(this.properties.getProperty(MonkeyConstants.GRACEFUL_RESTART_RS_SLEEP_TIME, MonkeyConstants.DEFAULT_GRACEFUL_RESTART_RS_SLEEP_TIME + "")); @@ -78,5 +91,8 @@ private void loadProperties() { rollingBatchSuspendtRSRatio = Float.parseFloat(this.properties.getProperty(MonkeyConstants.ROLLING_BATCH_SUSPEND_RS_RATIO, MonkeyConstants.DEFAULT_ROLLING_BATCH_SUSPEND_RS_RATIO + "")); + action1Period = + Long.parseLong(this.properties.getProperty(MonkeyConstants.PERIODIC_ACTION1_PERIOD, + MonkeyConstants.DEFAULT_PERIODIC_ACTION1_PERIOD + "")); } } From 2984474c8d9942c95af90ee8be186b32c301d938 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 13 Mar 2024 14:33:45 -0700 Subject: [PATCH 289/514] HBASE-28441 Update downloads.xml for 2.5.8 Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index e9e9d97ebaed..039c05c01f38 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -70,26 +70,26 @@ under the License. - 2.5.7 + 2.5.8 - 2023/12/24 + 2024/03/12 - 2.5.7 vs 2.5.6 + 2.5.8 vs 2.5.7 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc)
- hadoop3-bin (sha512 asc)
- hadoop3-client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc)
+ hadoop3-bin (sha512 asc)
+ hadoop3-client-bin (sha512 asc) stable release From 9361ae506a112ccd1525a03da866bf2286931df9 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Wed, 13 Mar 2024 17:58:54 -0400 Subject: [PATCH 290/514] HBASE-28385 Improve scan quota estimates when using block bytes scanned (#5713) Signed-off-by: Bryan Beaudreault --- .../hbase/quotas/DefaultOperationQuota.java | 89 ++++++++++-- .../hbase/quotas/ExceedOperationQuota.java | 33 ++++- .../hbase/quotas/NoopOperationQuota.java | 10 +- .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 5 + .../hadoop/hbase/quotas/OperationQuota.java | 20 ++- .../hadoop/hbase/quotas/QuotaLimiter.java | 3 + .../quotas/RegionServerRpcQuotaManager.java | 80 ++++++++--- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 5 + .../hbase/regionserver/RSRpcServices.java | 31 ++++- .../quotas/TestBlockBytesScannedQuota.java | 71 ++++++++-- .../quotas/TestDefaultOperationQuota.java | 128 ++++++++++++++++++ .../hbase/quotas/ThrottleQuotaTestUtil.java | 7 +- 12 files changed, 427 insertions(+), 55 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index a4ff8b2a859e..2e26765a6a19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -27,10 +27,17 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + @InterfaceAudience.Private @InterfaceStability.Evolving public class DefaultOperationQuota implements OperationQuota { + // a single scan estimate can consume no more than this proportion of the limiter's limit + // this prevents a long-running scan from being estimated at, say, 100MB of IO against + // a <100MB/IO throttle (because this would never succeed) + private static final double MAX_SCAN_ESTIMATE_PROPORTIONAL_LIMIT_CONSUMPTION = 0.9; + protected final List limiters; private final long writeCapacityUnit; private final long readCapacityUnit; @@ -53,6 +60,7 @@ public class DefaultOperationQuota implements OperationQuota { protected long readCapacityUnitDiff = 0; private boolean useResultSizeBytes; private long blockSizeBytes; + private long maxScanEstimate; public DefaultOperationQuota(final Configuration conf, final int blockSizeBytes, final QuotaLimiter... limiters) { @@ -60,6 +68,9 @@ public DefaultOperationQuota(final Configuration conf, final int blockSizeBytes, this.useResultSizeBytes = conf.getBoolean(OperationQuota.USE_RESULT_SIZE_BYTES, USE_RESULT_SIZE_BYTES_DEFAULT); this.blockSizeBytes = blockSizeBytes; + long readSizeLimit = + Arrays.stream(limiters).mapToLong(QuotaLimiter::getReadLimit).min().orElse(Long.MAX_VALUE); + maxScanEstimate = Math.round(MAX_SCAN_ESTIMATE_PROPORTIONAL_LIMIT_CONSUMPTION * readSizeLimit); } /** @@ -80,21 +91,34 @@ public DefaultOperationQuota(final Configuration conf, final List } @Override - public void checkQuota(int numWrites, int numReads, int numScans) throws RpcThrottlingException { - updateEstimateConsumeQuota(numWrites, numReads, numScans); + public void checkBatchQuota(int numWrites, int numReads) throws RpcThrottlingException { + updateEstimateConsumeBatchQuota(numWrites, numReads); + checkQuota(numWrites, numReads); + } + + @Override + public void checkScanQuota(ClientProtos.ScanRequest scanRequest, long maxScannerResultSize, + long maxBlockBytesScanned, long prevBlockBytesScannedDifference) throws RpcThrottlingException { + updateEstimateConsumeScanQuota(scanRequest, maxScannerResultSize, maxBlockBytesScanned, + prevBlockBytesScannedDifference); + checkQuota(0, 1); + } + private void checkQuota(long numWrites, long numReads) throws RpcThrottlingException { readAvailable = Long.MAX_VALUE; for (final QuotaLimiter limiter : limiters) { - if (limiter.isBypass()) continue; + if (limiter.isBypass()) { + continue; + } - limiter.checkQuota(numWrites, writeConsumed, numReads + numScans, readConsumed, + limiter.checkQuota(numWrites, writeConsumed, numReads, readConsumed, writeCapacityUnitConsumed, readCapacityUnitConsumed); readAvailable = Math.min(readAvailable, limiter.getReadAvailable()); } for (final QuotaLimiter limiter : limiters) { - limiter.grabQuota(numWrites, writeConsumed, numReads + numScans, readConsumed, - writeCapacityUnitConsumed, readCapacityUnitConsumed); + limiter.grabQuota(numWrites, writeConsumed, numReads, readConsumed, writeCapacityUnitConsumed, + readCapacityUnitConsumed); } } @@ -158,24 +182,69 @@ public void addMutation(final Mutation mutation) { * Update estimate quota(read/write size/capacityUnits) which will be consumed * @param numWrites the number of write requests * @param numReads the number of read requests - * @param numScans the number of scan requests */ - protected void updateEstimateConsumeQuota(int numWrites, int numReads, int numScans) { + protected void updateEstimateConsumeBatchQuota(int numWrites, int numReads) { writeConsumed = estimateConsume(OperationType.MUTATE, numWrites, 100); if (useResultSizeBytes) { readConsumed = estimateConsume(OperationType.GET, numReads, 100); - readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000); } else { // assume 1 block required for reads. this is probably a low estimate, which is okay readConsumed = numReads > 0 ? blockSizeBytes : 0; - readConsumed += numScans > 0 ? blockSizeBytes : 0; } writeCapacityUnitConsumed = calculateWriteCapacityUnit(writeConsumed); readCapacityUnitConsumed = calculateReadCapacityUnit(readConsumed); } + /** + * Update estimate quota(read/write size/capacityUnits) which will be consumed + * @param scanRequest the scan to be executed + * @param maxScannerResultSize the maximum bytes to be returned by the scanner + * @param maxBlockBytesScanned the maximum bytes scanned in a single RPC call by the + * scanner + * @param prevBlockBytesScannedDifference the difference between BBS of the previous two next + * calls + */ + protected void updateEstimateConsumeScanQuota(ClientProtos.ScanRequest scanRequest, + long maxScannerResultSize, long maxBlockBytesScanned, long prevBlockBytesScannedDifference) { + if (useResultSizeBytes) { + readConsumed = estimateConsume(OperationType.SCAN, 1, 1000); + } else { + long estimate = getScanReadConsumeEstimate(blockSizeBytes, scanRequest.getNextCallSeq(), + maxScannerResultSize, maxBlockBytesScanned, prevBlockBytesScannedDifference); + readConsumed = Math.min(maxScanEstimate, estimate); + } + + readCapacityUnitConsumed = calculateReadCapacityUnit(readConsumed); + } + + protected static long getScanReadConsumeEstimate(long blockSizeBytes, long nextCallSeq, + long maxScannerResultSize, long maxBlockBytesScanned, long prevBlockBytesScannedDifference) { + /* + * Estimating scan workload is more complicated, and if we severely underestimate workloads then + * throttled clients will exhaust retries too quickly, and could saturate the RPC layer + */ + if (nextCallSeq == 0) { + // start scanners with an optimistic 1 block IO estimate + // it is better to underestimate a large scan in the beginning + // than to overestimate, and block, a small scan + return blockSizeBytes; + } + + boolean isWorkloadGrowing = prevBlockBytesScannedDifference > blockSizeBytes; + if (isWorkloadGrowing) { + // if nextCallSeq > 0 and the workload is growing then our estimate + // should consider that the workload may continue to increase + return Math.min(maxScannerResultSize, nextCallSeq * maxBlockBytesScanned); + } else { + // if nextCallSeq > 0 and the workload is shrinking or flat + // then our workload has likely plateaued. We can just rely on the existing + // maxBlockBytesScanned as our estimate in this case. + return maxBlockBytesScanned; + } + } + private long estimateConsume(final OperationType type, int numReqs, long avgSize) { if (numReqs > 0) { return avgSize * numReqs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java index 1788e550f22a..3077d6dac537 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ExceedOperationQuota.java @@ -23,6 +23,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /* * Internal class used to check and consume quota if exceed throttle quota is enabled. Exceed * throttle quota means, user can over consume user/namespace/table quota if region server has @@ -47,15 +49,32 @@ public ExceedOperationQuota(final Configuration conf, int blockSizeBytes, } @Override - public void checkQuota(int numWrites, int numReads, int numScans) throws RpcThrottlingException { + public void checkBatchQuota(int numWrites, int numReads) throws RpcThrottlingException { + Runnable estimateQuota = () -> updateEstimateConsumeBatchQuota(numWrites, numReads); + CheckQuotaRunnable checkQuota = () -> super.checkBatchQuota(numWrites, numReads); + checkQuota(estimateQuota, checkQuota, numWrites, numReads, 0); + } + + @Override + public void checkScanQuota(ClientProtos.ScanRequest scanRequest, long maxScannerResultSize, + long maxBlockBytesScanned, long prevBlockBytesScannedDifference) throws RpcThrottlingException { + Runnable estimateQuota = () -> updateEstimateConsumeScanQuota(scanRequest, maxScannerResultSize, + maxBlockBytesScanned, prevBlockBytesScannedDifference); + CheckQuotaRunnable checkQuota = () -> super.checkScanQuota(scanRequest, maxScannerResultSize, + maxBlockBytesScanned, prevBlockBytesScannedDifference); + checkQuota(estimateQuota, checkQuota, 0, 0, 1); + } + + private void checkQuota(Runnable estimateQuota, CheckQuotaRunnable checkQuota, int numWrites, + int numReads, int numScans) throws RpcThrottlingException { if (regionServerLimiter.isBypass()) { // If region server limiter is bypass, which means no region server quota is set, check and // throttle by all other quotas. In this condition, exceed throttle quota will not work. LOG.warn("Exceed throttle quota is enabled but no region server quotas found"); - super.checkQuota(numWrites, numReads, numScans); + checkQuota.run(); } else { // 1. Update estimate quota which will be consumed - updateEstimateConsumeQuota(numWrites, numReads, numScans); + estimateQuota.run(); // 2. Check if region server limiter is enough. If not, throw RpcThrottlingException. regionServerLimiter.checkQuota(numWrites, writeConsumed, numReads + numScans, readConsumed, writeCapacityUnitConsumed, readCapacityUnitConsumed); @@ -63,11 +82,11 @@ public void checkQuota(int numWrites, int numReads, int numScans) throws RpcThro // limiter is enough. boolean exceed = false; try { - super.checkQuota(numWrites, numReads, numScans); + checkQuota.run(); } catch (RpcThrottlingException e) { exceed = true; if (LOG.isDebugEnabled()) { - LOG.debug("Read/Write requests num exceeds quota: writes:{} reads:{} scan:{}, " + LOG.debug("Read/Write requests num exceeds quota: writes:{} reads:{}, scans:{}, " + "try use region server quota", numWrites, numReads, numScans); } } @@ -96,4 +115,8 @@ public void close() { regionServerLimiter.consumeRead(readDiff, readCapacityUnitDiff); } } + + private interface CheckQuotaRunnable { + void run() throws RpcThrottlingException; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java index b64429d9adc8..736560e6fd17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java @@ -23,6 +23,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /** * Noop operation quota returned when no quota is associated to the user/table */ @@ -40,7 +42,13 @@ public static OperationQuota get() { } @Override - public void checkQuota(int numWrites, int numReads, int numScans) throws RpcThrottlingException { + public void checkBatchQuota(int numWrites, int numReads) throws RpcThrottlingException { + // no-op + } + + @Override + public void checkScanQuota(ClientProtos.ScanRequest scanRequest, long maxScannerResultSize, + long maxBlockBytesScanned, long prevBlockBytesScannedDifference) throws RpcThrottlingException { // no-op } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java index 63d7610115af..cf1e49c12e5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java @@ -70,6 +70,11 @@ public long getReadAvailable() { throw new UnsupportedOperationException(); } + @Override + public long getReadLimit() { + return Long.MAX_VALUE; + } + @Override public String toString() { return "NoopQuotaLimiter"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java index bedad5e98673..ef0a35fa5892 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java @@ -23,6 +23,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /** * Interface that allows to check the quota available for an operation. */ @@ -51,11 +53,25 @@ public enum OperationType { * on the number of operations to perform and the average size accumulated during time. * @param numWrites number of write operation that will be performed * @param numReads number of small-read operation that will be performed - * @param numScans number of long-read operation that will be performed * @throws RpcThrottlingException if the operation cannot be performed because RPC quota is * exceeded. */ - void checkQuota(int numWrites, int numReads, int numScans) throws RpcThrottlingException; + void checkBatchQuota(int numWrites, int numReads) throws RpcThrottlingException; + + /** + * Checks if it is possible to execute the scan. The quota will be estimated based on the + * composition of the scan. + * @param scanRequest the given scan operation + * @param maxScannerResultSize the maximum bytes to be returned by the scanner + * @param maxBlockBytesScanned the maximum bytes scanned in a single RPC call by the + * scanner + * @param prevBlockBytesScannedDifference the difference between BBS of the previous two next + * calls + * @throws RpcThrottlingException if the operation cannot be performed because RPC quota is + * exceeded. + */ + void checkScanQuota(ClientProtos.ScanRequest scanRequest, long maxScannerResultSize, + long maxBlockBytesScanned, long prevBlockBytesScannedDifference) throws RpcThrottlingException; /** Cleanup method on operation completion */ void close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java index 14326e4e0d25..8d00a702e253 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java @@ -76,6 +76,9 @@ void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize, /** Returns the number of bytes available to read to avoid exceeding the quota */ long getReadAvailable(); + /** Returns the maximum number of bytes ever available to read */ + long getReadLimit(); + /** Returns the number of bytes available to write to avoid exceeding the quota */ long getWriteAvailable(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 3c72c662887b..92a0cfd5c135 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -156,38 +156,82 @@ public OperationQuota getQuota(final UserGroupInformation ugi, final TableName t /** * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the - * available quota and to report the data/usage of the operation. + * available quota and to report the data/usage of the operation. This method is specific to scans + * because estimating a scan's workload is more complicated than estimating the workload of a + * get/put. + * @param region the region where the operation will be performed + * @param scanRequest the scan to be estimated against the quota + * @param maxScannerResultSize the maximum bytes to be returned by the scanner + * @param maxBlockBytesScanned the maximum bytes scanned in a single RPC call by the + * scanner + * @param prevBlockBytesScannedDifference the difference between BBS of the previous two next + * calls + * @return the OperationQuota + * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. + */ + public OperationQuota checkScanQuota(final Region region, + final ClientProtos.ScanRequest scanRequest, long maxScannerResultSize, + long maxBlockBytesScanned, long prevBlockBytesScannedDifference) + throws IOException, RpcThrottlingException { + Optional user = RpcServer.getRequestUser(); + UserGroupInformation ugi; + if (user.isPresent()) { + ugi = user.get().getUGI(); + } else { + ugi = User.getCurrent().getUGI(); + } + TableDescriptor tableDescriptor = region.getTableDescriptor(); + TableName table = tableDescriptor.getTableName(); + + OperationQuota quota = getQuota(ugi, table, region.getMinBlockSizeBytes()); + try { + quota.checkScanQuota(scanRequest, maxScannerResultSize, maxBlockBytesScanned, + prevBlockBytesScannedDifference); + } catch (RpcThrottlingException e) { + LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + " scan=" + + scanRequest.getScannerId() + ": " + e.getMessage()); + throw e; + } + return quota; + } + + /** + * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the + * available quota and to report the data/usage of the operation. This method does not support + * scans because estimating a scan's workload is more complicated than estimating the workload of + * a get/put. * @param region the region where the operation will be performed * @param type the operation type * @return the OperationQuota * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. */ - public OperationQuota checkQuota(final Region region, final OperationQuota.OperationType type) - throws IOException, RpcThrottlingException { + public OperationQuota checkBatchQuota(final Region region, + final OperationQuota.OperationType type) throws IOException, RpcThrottlingException { switch (type) { - case SCAN: - return checkQuota(region, 0, 0, 1); case GET: - return checkQuota(region, 0, 1, 0); + return this.checkBatchQuota(region, 0, 1); case MUTATE: - return checkQuota(region, 1, 0, 0); + return this.checkBatchQuota(region, 1, 0); case CHECK_AND_MUTATE: - return checkQuota(region, 1, 1, 0); + return this.checkBatchQuota(region, 1, 1); } throw new RuntimeException("Invalid operation type: " + type); } /** * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the - * available quota and to report the data/usage of the operation. + * available quota and to report the data/usage of the operation. This method does not support + * scans because estimating a scan's workload is more complicated than estimating the workload of + * a get/put. * @param region the region where the operation will be performed * @param actions the "multi" actions to perform * @param hasCondition whether the RegionAction has a condition * @return the OperationQuota * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. */ - public OperationQuota checkQuota(final Region region, final List actions, - boolean hasCondition) throws IOException, RpcThrottlingException { + public OperationQuota checkBatchQuota(final Region region, + final List actions, boolean hasCondition) + throws IOException, RpcThrottlingException { int numWrites = 0; int numReads = 0; for (final ClientProtos.Action action : actions) { @@ -202,7 +246,7 @@ public OperationQuota checkQuota(final Region region, final List user = RpcServer.getRequestUser(); UserGroupInformation ugi; if (user.isPresent()) { @@ -229,11 +272,10 @@ private OperationQuota checkQuota(final Region region, final int numWrites, fina OperationQuota quota = getQuota(ugi, table, region.getMinBlockSizeBytes()); try { - quota.checkQuota(numWrites, numReads, numScans); + quota.checkBatchQuota(numWrites, numReads); } catch (RpcThrottlingException e) { - LOG.debug( - "Throttling exception for user=" + ugi.getUserName() + " table=" + table + " numWrites=" - + numWrites + " numReads=" + numReads + " numScans=" + numScans + ": " + e.getMessage()); + LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + + " numWrites=" + numWrites + " numReads=" + numReads + ": " + e.getMessage()); throw e; } return quota; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index 8ae2cae01881..483edbcd3a4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -243,6 +243,11 @@ public long getReadAvailable() { return readSizeLimiter.getAvailable(); } + @Override + public long getReadLimit() { + return Math.min(readSizeLimiter.getLimit(), reqSizeLimiter.getLimit()); + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 7043b78c0485..a2b9a93263d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -429,6 +429,9 @@ static final class RegionScannerHolder { private boolean fullRegionScan; private final String clientIPAndPort; private final String userName; + private volatile long maxBlockBytesScanned = 0; + private volatile long prevBlockBytesScanned = 0; + private volatile long prevBlockBytesScannedDifference = 0; RegionScannerHolder(RegionScanner s, HRegion r, RpcCallback closeCallBack, RpcCallback shippedCallback, boolean needCursor, boolean fullRegionScan, @@ -452,6 +455,22 @@ boolean incNextCallSeq(long currentSeq) { return nextCallSeq.compareAndSet(currentSeq, currentSeq + 1); } + long getMaxBlockBytesScanned() { + return maxBlockBytesScanned; + } + + long getPrevBlockBytesScannedDifference() { + return prevBlockBytesScannedDifference; + } + + void updateBlockBytesScanned(long blockBytesScanned) { + prevBlockBytesScannedDifference = blockBytesScanned - prevBlockBytesScanned; + prevBlockBytesScanned = blockBytesScanned; + if (blockBytesScanned > maxBlockBytesScanned) { + maxBlockBytesScanned = blockBytesScanned; + } + } + // Should be called only when we need to print lease expired messages otherwise // cache the String once made. @Override @@ -2466,7 +2485,7 @@ public GetResponse get(final RpcController controller, final GetRequest request) } Boolean existence = null; Result r = null; - quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.GET); + quota = getRpcQuotaManager().checkBatchQuota(region, OperationQuota.OperationType.GET); Get clientGet = ProtobufUtil.toGet(get); if (get.getExistenceOnly() && region.getCoprocessorHost() != null) { @@ -2683,7 +2702,7 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) try { region = getRegion(regionSpecifier); - quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList(), + quota = getRpcQuotaManager().checkBatchQuota(region, regionAction.getActionList(), regionAction.hasCondition()); } catch (IOException e) { failRegionAction(responseBuilder, regionActionResultBuilder, regionAction, cellScanner, e); @@ -2746,7 +2765,7 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) try { region = getRegion(regionSpecifier); - quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList(), + quota = getRpcQuotaManager().checkBatchQuota(region, regionAction.getActionList(), regionAction.hasCondition()); } catch (IOException e) { failRegionAction(responseBuilder, regionActionResultBuilder, regionAction, cellScanner, e); @@ -2931,7 +2950,7 @@ public MutateResponse mutate(final RpcController rpcc, final MutateRequest reque } long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; OperationQuota.OperationType operationType = QuotaUtil.getQuotaOperationType(request); - quota = getRpcQuotaManager().checkQuota(region, operationType); + quota = getRpcQuotaManager().checkBatchQuota(region, operationType); ActivePolicyEnforcement spaceQuotaEnforcement = getSpaceQuotaManager().getActiveEnforcements(); @@ -3487,6 +3506,7 @@ private void scan(HBaseRpcController controller, ScanRequest request, RegionScan if (rpcCall != null) { responseCellSize = rpcCall.getResponseCellSize(); blockBytesScanned = rpcCall.getBlockBytesScanned(); + rsh.updateBlockBytesScanned(blockBytesScanned); } region.getMetrics().updateScan(); final MetricsRegionServer metricsRegionServer = server.getMetrics(); @@ -3590,7 +3610,8 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } OperationQuota quota; try { - quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN); + quota = getRpcQuotaManager().checkScanQuota(region, request, maxScannerResultSize, + rsh.getMaxBlockBytesScanned(), rsh.getPrevBlockBytesScannedDifference()); } catch (IOException e) { addScannerLeaseBack(lease); throw new ServiceException(e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java index 5de9a2d1a900..c058abe214c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestBlockBytesScannedQuota.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doScans; import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.triggerUserCacheRefresh; import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.waitMinuteQuota; +import static org.junit.Assert.assertTrue; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; @@ -60,12 +61,17 @@ public class TestBlockBytesScannedQuota { private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static final TableName TABLE_NAME = TableName.valueOf("BlockBytesScannedQuotaTest"); + private static final long MAX_SCANNER_RESULT_SIZE = 100 * 1024 * 1024; @BeforeClass public static void setUpBeforeClass() throws Exception { // client should fail fast TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 10); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + TEST_UTIL.getConfiguration().setLong(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, + MAX_SCANNER_RESULT_SIZE); + TEST_UTIL.getConfiguration().setClass(RateLimiter.QUOTA_RATE_LIMITER_CONF_KEY, + AverageIntervalRateLimiter.class, RateLimiter.class); // quotas enabled, using block bytes scanned TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); @@ -140,27 +146,75 @@ public void testBBSScan() throws Exception { waitMinuteQuota(); // should execute 1 request - testTraffic(() -> doScans(5, table), 1, 0); + testTraffic(() -> doScans(5, table, 1), 1, 0); // Remove all the limits admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); - testTraffic(() -> doScans(100, table), 100, 0); - testTraffic(() -> doScans(100, table), 100, 0); + testTraffic(() -> doScans(100, table, 1), 100, 0); + testTraffic(() -> doScans(100, table, 1), 100, 0); // Add ~3 block/sec limit. This should support >1 scans admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, Math.round(3.1 * blockSize), TimeUnit.SECONDS)); triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // Add 50 block/sec limit. This should support >1 scans + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, + Math.round(50.1 * blockSize), TimeUnit.SECONDS)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // This will produce some throttling exceptions, but all/most should succeed within the timeout + testTraffic(() -> doScans(100, table, 1), 75, 25); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // With large caching, a big scan should succeed + testTraffic(() -> doScans(10_000, table, 10_000), 10_000, 0); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // Remove all the limits + admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + testTraffic(() -> doScans(100, table, 1), 100, 0); + testTraffic(() -> doScans(100, table, 1), 100, 0); + } + + @Test + public void testSmallScanNeverBlockedByLargeEstimate() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final String userName = User.getCurrent().getShortName(); + Table table = admin.getConnection().getTable(TABLE_NAME); - // should execute some requests, but not all - testTraffic(() -> doScans(100, table), 100, 90); + doPuts(10_000, FAMILY, QUALIFIER, table); + TEST_UTIL.flush(TABLE_NAME); + + // Add 99MB/sec limit. + // This should never be blocked, but with a sequence number approaching 10k, without + // other intervention, we would estimate a scan workload approaching 625MB or the + // maxScannerResultSize (both larger than the 90MB limit). This test ensures that all + // requests succeed, so the estimate never becomes large enough to cause read downtime + long limit = 99 * 1024 * 1024; + assertTrue(limit <= MAX_SCANNER_RESULT_SIZE); // always true, but protecting against code + // changes + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_SIZE, limit, + TimeUnit.SECONDS)); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); + + // should execute all requests + testTraffic(() -> doScans(10_000, table, 1), 10_000, 0); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); + waitMinuteQuota(); // Remove all the limits admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); - testTraffic(() -> doScans(100, table), 100, 0); - testTraffic(() -> doScans(100, table), 100, 0); + testTraffic(() -> doScans(100, table, 1), 100, 0); + testTraffic(() -> doScans(100, table, 1), 100, 0); } @Test @@ -223,9 +277,8 @@ private void testTraffic(Callable trafficCallable, long expectedSuccess, l boolean success = (actualSuccess >= expectedSuccess - marginOfError) && (actualSuccess <= expectedSuccess + marginOfError); if (!success) { - triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); waitMinuteQuota(); - Thread.sleep(15_000L); } return success; }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java new file mode 100644 index 000000000000..4684be02d69d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, SmallTests.class }) +public class TestDefaultOperationQuota { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestDefaultOperationQuota.class); + + @Test + public void testScanEstimateNewScanner() { + long blockSize = 64 * 1024; + long nextCallSeq = 0; + long maxScannerResultSize = 100 * 1024 * 1024; + long maxBlockBytesScanned = 0; + long prevBBSDifference = 0; + long estimate = DefaultOperationQuota.getScanReadConsumeEstimate(blockSize, nextCallSeq, + maxScannerResultSize, maxBlockBytesScanned, prevBBSDifference); + + // new scanner should estimate scan read as 1 block + assertEquals(blockSize, estimate); + } + + @Test + public void testScanEstimateSecondNextCall() { + long blockSize = 64 * 1024; + long nextCallSeq = 1; + long maxScannerResultSize = 100 * 1024 * 1024; + long maxBlockBytesScanned = 10 * blockSize; + long prevBBSDifference = 10 * blockSize; + long estimate = DefaultOperationQuota.getScanReadConsumeEstimate(blockSize, nextCallSeq, + maxScannerResultSize, maxBlockBytesScanned, prevBBSDifference); + + // 2nd next call should be estimated at maxBBS + assertEquals(maxBlockBytesScanned, estimate); + } + + @Test + public void testScanEstimateFlatWorkload() { + long blockSize = 64 * 1024; + long nextCallSeq = 100; + long maxScannerResultSize = 100 * 1024 * 1024; + long maxBlockBytesScanned = 10 * blockSize; + long prevBBSDifference = 0; + long estimate = DefaultOperationQuota.getScanReadConsumeEstimate(blockSize, nextCallSeq, + maxScannerResultSize, maxBlockBytesScanned, prevBBSDifference); + + // flat workload should not overestimate + assertEquals(maxBlockBytesScanned, estimate); + } + + @Test + public void testScanEstimateVariableFlatWorkload() { + long blockSize = 64 * 1024; + long nextCallSeq = 1; + long maxScannerResultSize = 100 * 1024 * 1024; + long maxBlockBytesScanned = 10 * blockSize; + long prevBBSDifference = 0; + for (int i = 0; i < 100; i++) { + long variation = Math.round(Math.random() * blockSize); + if (variation % 2 == 0) { + variation *= -1; + } + // despite +/- <1 block variation, we consider this workload flat + prevBBSDifference = variation; + + long estimate = DefaultOperationQuota.getScanReadConsumeEstimate(blockSize, nextCallSeq + i, + maxScannerResultSize, maxBlockBytesScanned, prevBBSDifference); + + // flat workload should not overestimate + assertEquals(maxBlockBytesScanned, estimate); + } + } + + @Test + public void testScanEstimateGrowingWorkload() { + long blockSize = 64 * 1024; + long nextCallSeq = 100; + long maxScannerResultSize = 100 * 1024 * 1024; + long maxBlockBytesScanned = 20 * blockSize; + long prevBBSDifference = 10 * blockSize; + long estimate = DefaultOperationQuota.getScanReadConsumeEstimate(blockSize, nextCallSeq, + maxScannerResultSize, maxBlockBytesScanned, prevBBSDifference); + + // growing workload should overestimate + assertTrue(nextCallSeq * maxBlockBytesScanned == estimate || maxScannerResultSize == estimate); + } + + @Test + public void testScanEstimateShrinkingWorkload() { + long blockSize = 64 * 1024; + long nextCallSeq = 100; + long maxScannerResultSize = 100 * 1024 * 1024; + long maxBlockBytesScanned = 20 * blockSize; + long prevBBSDifference = -10 * blockSize; + long estimate = DefaultOperationQuota.getScanReadConsumeEstimate(blockSize, nextCallSeq, + maxScannerResultSize, maxBlockBytesScanned, prevBBSDifference); + + // shrinking workload should only shrink estimate to maxBBS + assertEquals(maxBlockBytesScanned, estimate); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java index ff34c52386bf..8da2989921aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java @@ -152,22 +152,21 @@ static long doMultiGets(int maxOps, int batchSize, int rowCount, byte[] family, return opCount; } - static long doScans(int maxOps, Table table) { + static long doScans(int desiredRows, Table table, int caching) { int count = 0; - int caching = 100; try { Scan scan = new Scan(); scan.setCaching(caching); scan.setCacheBlocks(false); ResultScanner scanner = table.getScanner(scan); - while (count < (maxOps * caching)) { + while (count < desiredRows) { scanner.next(); count += 1; } } catch (IOException e) { LOG.error("scan failed after nRetries=" + count, e); } - return count / caching; + return count; } static void triggerUserCacheRefresh(HBaseTestingUtil testUtil, boolean bypass, From 0c1224cc9a844025d483085cd1a4a9c43d17a26f Mon Sep 17 00:00:00 2001 From: guluo Date: Mon, 18 Mar 2024 21:26:15 +0800 Subject: [PATCH 291/514] HBASE-28427 FNFE related to 'master:store' when moving archived hfiles to global archived dir (#5756) Signed-off-by: Duo Zhang --- .../MasterRegionFlusherAndCompactor.java | 11 +++++-- .../hbase/regionserver/TestHRegion.java | 33 +++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFlusherAndCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFlusherAndCompactor.java index 3d4bfea146e5..c06420ea9920 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFlusherAndCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFlusherAndCompactor.java @@ -142,8 +142,15 @@ private void moveHFileToGlobalArchiveDir() throws IOException { Path globalStoreArchiveDir = HFileArchiveUtil.getStoreArchivePathForArchivePath( globalArchivePath, region.getRegionInfo(), store.getColumnFamilyDescriptor().getName()); try { - MasterRegionUtils.moveFilesUnderDir(fs, storeArchiveDir, globalStoreArchiveDir, - archivedHFileSuffix); + if (fs.exists(storeArchiveDir)) { + MasterRegionUtils.moveFilesUnderDir(fs, storeArchiveDir, globalStoreArchiveDir, + archivedHFileSuffix); + } else { + LOG.warn( + "Archived dir {} does not exist, there is no need to move archived hfiles from {} " + + "to global dir {} .", + storeArchiveDir, storeArchiveDir, globalStoreArchiveDir); + } } catch (IOException e) { LOG.warn("Failed to move archived hfiles from {} to global dir {}", storeArchiveDir, globalStoreArchiveDir, e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index abeec8a095ba..a271920c0150 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -4649,6 +4649,39 @@ public void testWritesWhileScanning() throws IOException, InterruptedException { } } + @Test + public void testCloseAndArchiveCompactedFiles() throws IOException { + byte[] CF1 = Bytes.toBytes("CF1"); + byte[] CF2 = Bytes.toBytes("CF2"); + this.region = initHRegion(tableName, method, CONF, CF1, CF2); + for (int i = 0; i < 2; i++) { + int index = i; + Put put = + new Put(Bytes.toBytes(index)).addColumn(CF1, Bytes.toBytes("q"), Bytes.toBytes(index)); + region.put(put); + region.flush(true); + } + + region.compact(true); + + HStore store1 = region.getStore(CF1); + HStore store2 = region.getStore(CF2); + store1.closeAndArchiveCompactedFiles(); + store2.closeAndArchiveCompactedFiles(); + + int storefilesCount = region.getStores().stream().mapToInt(Store::getStorefilesCount).sum(); + assertTrue(storefilesCount == 1); + + FileSystem fs = region.getRegionFileSystem().getFileSystem(); + Configuration conf = region.getReadOnlyConfiguration(); + RegionInfo regionInfo = region.getRegionInfo(); + Path store1ArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, CF1); + assertTrue(fs.exists(store1ArchiveDir)); + // The archived dir of CF2 does not exist because this column family has no data at all + Path store2ArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, CF2); + assertFalse(fs.exists(store2ArchiveDir)); + } + protected class PutThread extends Thread { private volatile boolean done; private volatile int numPutsFinished = 0; From 0763a740960f7cbb177abd596d9cb203aaf5f025 Mon Sep 17 00:00:00 2001 From: chandrasekhar-188k <154109917+chandrasekhar-188k@users.noreply.github.com> Date: Tue, 19 Mar 2024 18:56:28 +0530 Subject: [PATCH 292/514] HBASE-28124 Missing fields in Scan.toJSON (#5678) Signed-off-by: Pankaj Kumar < pankajkumar@apache.org> Signed-off-by: Rajeshbabu Chintaguntla Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/Scan.java | 31 +++++- .../hbase/client/TestOnlineLogRecord.java | 26 +++-- .../hadoop/hbase/client/TestOperation.java | 98 +++++++++++++++++++ 3 files changed, 144 insertions(+), 11 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index b9adefb40cde..f132459a9c0f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -25,6 +25,7 @@ import java.util.NavigableSet; import java.util.TreeMap; import java.util.TreeSet; +import java.util.stream.Collectors; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.filter.Filter; @@ -747,7 +748,7 @@ public Map getFingerprint() { */ @Override public Map toMap(int maxCols) { - // start with the fingerpring map and build on top of it + // start with the fingerprint map and build on top of it Map map = getFingerprint(); // map from families to column list replaces fingerprint's list of families Map> familyColumns = new HashMap<>(); @@ -795,6 +796,34 @@ public Map toMap(int maxCols) { if (getId() != null) { map.put("id", getId()); } + map.put("includeStartRow", includeStartRow); + map.put("includeStopRow", includeStopRow); + map.put("allowPartialResults", allowPartialResults); + map.put("storeLimit", storeLimit); + map.put("storeOffset", storeOffset); + map.put("reversed", reversed); + if (null != asyncPrefetch) { + map.put("asyncPrefetch", asyncPrefetch); + } + map.put("mvccReadPoint", mvccReadPoint); + map.put("limit", limit); + map.put("readType", readType); + map.put("needCursorResult", needCursorResult); + map.put("targetReplicaId", targetReplicaId); + map.put("consistency", consistency); + if (!colFamTimeRangeMap.isEmpty()) { + Map> colFamTimeRangeMapStr = colFamTimeRangeMap.entrySet().stream() + .collect(Collectors.toMap((e) -> Bytes.toStringBinary(e.getKey()), e -> { + TimeRange value = e.getValue(); + List rangeList = new ArrayList<>(); + rangeList.add(value.getMin()); + rangeList.add(value.getMax()); + return rangeList; + })); + + map.put("colFamTimeRangeMap", colFamTimeRangeMapStr); + } + map.put("priority", getPriority()); return map; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java index a16993d56591..72013b6f294b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOnlineLogRecord.java @@ -44,20 +44,26 @@ public void itSerializesScan() { Scan scan = new Scan(); scan.withStartRow(Bytes.toBytes(123)); scan.withStopRow(Bytes.toBytes(456)); - String expectedOutput = "{\n" + " \"startTime\": 1,\n" + " \"processingTime\": 2,\n" - + " \"queueTime\": 3,\n" + " \"responseSize\": 4,\n" + " \"blockBytesScanned\": 5,\n" - + " \"fsReadTime\": 6,\n" + " \"multiGetsCount\": 6,\n" + " \"multiMutationsCount\": 7,\n" - + " \"scan\": {\n" + " \"startRow\": \"\\\\x00\\\\x00\\\\x00{\",\n" - + " \"stopRow\": \"\\\\x00\\\\x00\\\\x01\\\\xC8\",\n" + " \"batch\": -1,\n" - + " \"cacheBlocks\": true,\n" + " \"totalColumns\": 0,\n" - + " \"maxResultSize\": -1,\n" + " \"families\": {},\n" + " \"caching\": -1,\n" - + " \"maxVersions\": 1,\n" + " \"timeRange\": [\n" + " 0,\n" - + " 9223372036854775807\n" + " ]\n" + " }\n" + "}"; + String expectedOutput = + "{\n" + " \"startTime\": 1,\n" + " \"processingTime\": 2,\n" + " \"queueTime\": 3,\n" + + " \"responseSize\": 4,\n" + " \"blockBytesScanned\": 5,\n" + " \"fsReadTime\": 6,\n" + + " \"multiGetsCount\": 6,\n" + " \"multiMutationsCount\": 7,\n" + " \"scan\": {\n" + + " \"startRow\": \"\\\\x00\\\\x00\\\\x00{\",\n" + " \"targetReplicaId\": -1,\n" + + " \"batch\": -1,\n" + " \"totalColumns\": 0,\n" + " \"maxResultSize\": -1,\n" + + " \"families\": {},\n" + " \"priority\": -1,\n" + " \"caching\": -1,\n" + + " \"includeStopRow\": false,\n" + " \"consistency\": \"STRONG\",\n" + + " \"maxVersions\": 1,\n" + " \"storeOffset\": 0,\n" + " \"mvccReadPoint\": -1,\n" + + " \"includeStartRow\": true,\n" + " \"needCursorResult\": false,\n" + + " \"stopRow\": \"\\\\x00\\\\x00\\\\x01\\\\xC8\",\n" + " \"storeLimit\": -1,\n" + + " \"limit\": -1,\n" + " \"cacheBlocks\": true,\n" + + " \"readType\": \"DEFAULT\",\n" + " \"allowPartialResults\": false,\n" + + " \"reversed\": false,\n" + " \"timeRange\": [\n" + " 0,\n" + + " 9223372036854775807\n" + " ]\n" + " }\n" + "}"; OnlineLogRecord o = new OnlineLogRecord(1, 2, 3, 4, 5, 6, null, null, null, null, null, null, null, 6, 7, 0, scan, Collections.emptyMap(), Collections.emptyMap()); String actualOutput = o.toJsonPrettyPrint(); System.out.println(actualOutput); - Assert.assertEquals(actualOutput, expectedOutput); + Assert.assertEquals(expectedOutput, actualOutput); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 4e0152a59a3d..0bbe8399914e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -69,6 +69,9 @@ import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken; import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder; +import org.apache.hbase.thirdparty.com.google.gson.LongSerializationPolicy; +import org.apache.hbase.thirdparty.com.google.gson.ToNumberPolicy; /** * Run tests that use the functionality of the Operation superclass for Puts, Gets, Deletes, Scans, @@ -345,6 +348,101 @@ public void testOperationJSON() throws IOException { kvMap.get("qualifier")); } + /** + * Test the client Scan Operations' JSON encoding to ensure that produced JSON is parseable and + * that the details are present and not corrupted. + * @throws IOException if the JSON conversion fails + */ + @Test + public void testScanOperationToJSON() throws IOException { + // produce a Scan Operation + Scan scan = new Scan().withStartRow(ROW, true); + scan.addColumn(FAMILY, QUALIFIER); + scan.withStopRow(ROW, true); + scan.readVersions(5); + scan.setBatch(10); + scan.setAllowPartialResults(true); + scan.setMaxResultsPerColumnFamily(3); + scan.setRowOffsetPerColumnFamily(8); + scan.setCaching(20); + scan.setMaxResultSize(50); + scan.setCacheBlocks(true); + scan.setReversed(true); + scan.setTimeRange(1000, 2000); + scan.setAsyncPrefetch(true); + scan.setMvccReadPoint(123); + scan.setLimit(5); + scan.setReadType(Scan.ReadType.PREAD); + scan.setNeedCursorResult(true); + scan.setFilter(SCV_FILTER); + scan.setReplicaId(1); + scan.setConsistency(Consistency.STRONG); + scan.setLoadColumnFamiliesOnDemand(true); + scan.setColumnFamilyTimeRange(FAMILY, 2000, 3000); + scan.setPriority(10); + + // get its JSON representation, and parse it + String json = scan.toJSON(); + Type typeOfHashMap = new TypeToken>() { + }.getType(); + Gson gson = new GsonBuilder().setLongSerializationPolicy(LongSerializationPolicy.STRING) + .setObjectToNumberStrategy(ToNumberPolicy.LONG_OR_DOUBLE).create(); + Map parsedJSON = gson.fromJson(json, typeOfHashMap); + // check for the row + assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), + parsedJSON.get("startRow")); + // check for the family and the qualifier. + List familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Scan.toJSON()", familyInfo); + assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); + assertEquals("Qualifier incorrect in Scan.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); + assertEquals("stopRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), + parsedJSON.get("stopRow")); + assertEquals("includeStartRow incorrect in Scan.toJSON()", true, + parsedJSON.get("includeStartRow")); + assertEquals("includeStopRow incorrect in Scan.toJSON()", true, + parsedJSON.get("includeStopRow")); + assertEquals("maxVersions incorrect in Scan.toJSON()", 5L, parsedJSON.get("maxVersions")); + assertEquals("batch incorrect in Scan.toJSON()", 10L, parsedJSON.get("batch")); + assertEquals("allowPartialResults incorrect in Scan.toJSON()", true, + parsedJSON.get("allowPartialResults")); + assertEquals("storeLimit incorrect in Scan.toJSON()", 3L, parsedJSON.get("storeLimit")); + assertEquals("storeOffset incorrect in Scan.toJSON()", 8L, parsedJSON.get("storeOffset")); + assertEquals("caching incorrect in Scan.toJSON()", 20L, parsedJSON.get("caching")); + assertEquals("maxResultSize incorrect in Scan.toJSON()", "50", parsedJSON.get("maxResultSize")); + assertEquals("cacheBlocks incorrect in Scan.toJSON()", true, parsedJSON.get("cacheBlocks")); + assertEquals("reversed incorrect in Scan.toJSON()", true, parsedJSON.get("reversed")); + List trList = (List) parsedJSON.get("timeRange"); + assertEquals("timeRange incorrect in Scan.toJSON()", 2, trList.size()); + assertEquals("timeRange incorrect in Scan.toJSON()", "1000", trList.get(0)); + assertEquals("timeRange incorrect in Scan.toJSON()", "2000", trList.get(1)); + + assertEquals("asyncPrefetch incorrect in Scan.toJSON()", true, parsedJSON.get("asyncPrefetch")); + assertEquals("mvccReadPoint incorrect in Scan.toJSON()", "123", + parsedJSON.get("mvccReadPoint")); + assertEquals("limit incorrect in Scan.toJSON()", 5L, parsedJSON.get("limit")); + assertEquals("readType incorrect in Scan.toJSON()", "PREAD", parsedJSON.get("readType")); + assertEquals("needCursorResult incorrect in Scan.toJSON()", true, + parsedJSON.get("needCursorResult")); + + Map colFamTimeRange = (Map) parsedJSON.get("colFamTimeRangeMap"); + assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", 1L, colFamTimeRange.size()); + List testFamily = (List) colFamTimeRange.get("testFamily"); + assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", 2L, testFamily.size()); + assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", "2000", testFamily.get(0)); + assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", "3000", testFamily.get(1)); + + assertEquals("targetReplicaId incorrect in Scan.toJSON()", 1L, + parsedJSON.get("targetReplicaId")); + assertEquals("consistency incorrect in Scan.toJSON()", "STRONG", parsedJSON.get("consistency")); + assertEquals("loadColumnFamiliesOnDemand incorrect in Scan.toJSON()", true, + parsedJSON.get("loadColumnFamiliesOnDemand")); + + assertEquals("priority incorrect in Scan.toJSON()", 10L, parsedJSON.get("priority")); + + } + @Test public void testPutCreationWithByteBuffer() { Put p = new Put(ROW); From ade6ab2148d93f5003df9f44c1c7bb38d489e081 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 21 Mar 2024 11:31:08 +0800 Subject: [PATCH 293/514] HBASE-28444 Bump org.apache.zookeeper:zookeeper from 3.8.3 to 3.8.4 (#5765) Signed-off-by: Bryan Beaudreault Signed-off-by: Andrew Purtell --- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 6 +- .../hadoop/hbase/zookeeper/TestZKUtil.java | 131 +++++++++--------- pom.xml | 2 +- 3 files changed, 66 insertions(+), 73 deletions(-) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index be8f25cc39b0..b0a99647fb96 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -772,8 +772,8 @@ private static void createAndFailSilent(ZKWatcher zkw, CreateAndFailSilent cafs) throws KeeperException { CreateRequest create = (CreateRequest) toZooKeeperOp(zkw, cafs).toRequestRecord(); String znode = create.getPath(); + RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); try { - RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); if (zk.exists(znode, false) == null) { zk.create(znode, create.getData(), create.getAcl(), CreateMode.fromFlag(create.getFlags())); } @@ -781,9 +781,9 @@ private static void createAndFailSilent(ZKWatcher zkw, CreateAndFailSilent cafs) // pass } catch (KeeperException.NoAuthException nee) { try { - if (null == zkw.getRecoverableZooKeeper().exists(znode, false)) { + if (zk.exists(znode, false) == null) { // If we failed to create the file and it does not already exist. - throw (nee); + throw nee; } } catch (InterruptedException ie) { zkw.interruptedException(ie); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java index 9572fee049f1..f30cf15bec92 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java @@ -22,9 +22,22 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.Arrays; import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -45,6 +58,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.mockito.AdditionalAnswers; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -139,31 +153,10 @@ public void testSetDataWithVersion() throws Exception { assertEquals(2, v2); } - /** - * A test for HBASE-3238 - * @throws IOException A connection attempt to zk failed - * @throws InterruptedException One of the non ZKUtil actions was interrupted - * @throws KeeperException Any of the zookeeper connections had a KeeperException - */ - @Test - public void testCreateSilentIsReallySilent() - throws InterruptedException, KeeperException, IOException { - Configuration c = UTIL.getConfiguration(); - - String aclZnode = "/aclRoot"; - String quorumServers = ZKConfig.getZKQuorumServersString(c); - int sessionTimeout = 5 * 1000; // 5 seconds - ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance); - zk.addAuthInfo("digest", Bytes.toBytes("hbase:rox")); - - // Save the previous ACL - Stat s; - List oldACL; - while (true) { + private V callAndIgnoreTransientError(Callable action) throws Exception { + for (;;) { try { - s = new Stat(); - oldACL = zk.getACL("/", s); - break; + return action.call(); } catch (KeeperException e) { switch (e.code()) { case CONNECTIONLOSS: @@ -177,54 +170,54 @@ public void testCreateSilentIsReallySilent() } } } + } - // I set this acl after the attempted creation of the cluster home node. - // Add retries in case of retryable zk exceptions. - while (true) { - try { - zk.setACL("/", ZooDefs.Ids.CREATOR_ALL_ACL, -1); - break; - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - LOG.warn("Possibly transient ZooKeeper exception: " + e); - Threads.sleep(100); - break; - default: - throw e; - } - } - } + /** + * A test for HBASE-3238 + */ + @Test + public void testCreateSilentIsReallySilent() throws Exception { + Configuration c = UTIL.getConfiguration(); - while (true) { - try { - zk.create(aclZnode, null, ZooDefs.Ids.CREATOR_ALL_ACL, CreateMode.PERSISTENT); - break; - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - LOG.warn("Possibly transient ZooKeeper exception: " + e); - Threads.sleep(100); - break; - default: - throw e; + String aclZnode = "/aclRoot"; + String quorumServers = ZKConfig.getZKQuorumServersString(c); + int sessionTimeout = 5 * 1000; // 5 seconds + try (ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance)) { + zk.addAuthInfo("digest", Bytes.toBytes("hbase:rox")); + + // Save the previous ACL + List oldACL = callAndIgnoreTransientError(() -> zk.getACL("/", new Stat())); + + // I set this acl after the attempted creation of the cluster home node. + // Add retries in case of retryable zk exceptions. + callAndIgnoreTransientError(() -> zk.setACL("/", ZooDefs.Ids.CREATOR_ALL_ACL, -1)); + + ZKWatcher watcher = spy(ZKW); + RecoverableZooKeeper rzk = mock(RecoverableZooKeeper.class, + AdditionalAnswers.delegatesTo(ZKW.getRecoverableZooKeeper())); + when(watcher.getRecoverableZooKeeper()).thenReturn(rzk); + AtomicBoolean firstExists = new AtomicBoolean(true); + doAnswer(inv -> { + String path = inv.getArgument(0); + boolean watch = inv.getArgument(1); + Stat stat = ZKW.getRecoverableZooKeeper().exists(path, watch); + // create the znode after first exists check, this is to simulate that we enter the create + // branch but we have no permission for creation, but the znode has been created by others + if (firstExists.compareAndSet(true, false)) { + callAndIgnoreTransientError(() -> zk.create(aclZnode, null, + Arrays.asList(new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE), + new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.AUTH_IDS)), + CreateMode.PERSISTENT)); } - } - } - zk.close(); - ZKUtil.createAndFailSilent(ZKW, aclZnode); - - // Restore the ACL - ZooKeeper zk3 = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance); - zk3.addAuthInfo("digest", Bytes.toBytes("hbase:rox")); - try { - zk3.setACL("/", oldACL, -1); - } finally { - zk3.close(); + return stat; + }).when(rzk).exists(any(), anyBoolean()); + ZKUtil.createAndFailSilent(watcher, aclZnode); + // make sure we call the exists method twice and create once + verify(rzk, times(2)).exists(any(), anyBoolean()); + verify(rzk).create(anyString(), any(), anyList(), any()); + // Restore the ACL + zk.addAuthInfo("digest", Bytes.toBytes("hbase:rox")); + zk.setACL("/", oldACL, -1); } } diff --git a/pom.xml b/pom.xml index 2340f0bbb3ca..9e1a4bdbe214 100644 --- a/pom.xml +++ b/pom.xml @@ -861,7 +861,7 @@ 0.6.1 thrift 0.14.1 - 3.8.3 + 3.8.4 2.11 1.7.30 4.0.3 From f934af864e79a237113d4f61757ad7a97e04d8ce Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 25 Mar 2024 15:06:11 -0400 Subject: [PATCH 294/514] HBASE-28453 FixedIntervalRateLimiter support for a shorter refill interval (#5773) Signed-off-by: Bryan Beaudreault --- .../quotas/FixedIntervalRateLimiter.java | 49 ++++++++++++- .../hadoop/hbase/quotas/RateLimiter.java | 6 +- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 20 +++--- .../hadoop/hbase/quotas/TestRateLimiter.java | 69 +++++++++++++++++++ 4 files changed, 130 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java index a717305b8c0a..c5b2fc7f5d83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java @@ -21,26 +21,65 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + /** * With this limiter resources will be refilled only after a fixed interval of time. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class FixedIntervalRateLimiter extends RateLimiter { + + /** + * The FixedIntervalRateLimiter can be harsh from a latency/backoff perspective, which makes it + * difficult to fully and consistently utilize a quota allowance. By configuring the + * {@link #RATE_LIMITER_REFILL_INTERVAL_MS} to a lower value you will encourage the rate limiter + * to throw smaller wait intervals for requests which may be fulfilled in timeframes shorter than + * the quota's full interval. For example, if you're saturating a 100MB/sec read IO quota with a + * ton of tiny gets, then configuring this to a value like 100ms will ensure that your retry + * backoffs approach ~100ms, rather than 1sec. Be careful not to configure this too low, or you + * may produce a dangerous amount of retry volume. + */ + public static final String RATE_LIMITER_REFILL_INTERVAL_MS = + "hbase.quota.rate.limiter.refill.interval.ms"; + private long nextRefillTime = -1L; + private final long refillInterval; + + public FixedIntervalRateLimiter() { + this(DEFAULT_TIME_UNIT); + } + + public FixedIntervalRateLimiter(long refillInterval) { + super(); + Preconditions.checkArgument(getTimeUnitInMillis() >= refillInterval, + String.format("Refill interval %s must be less than or equal to TimeUnit millis %s", + refillInterval, getTimeUnitInMillis())); + this.refillInterval = refillInterval; + } @Override public long refill(long limit) { final long now = EnvironmentEdgeManager.currentTime(); + if (nextRefillTime == -1) { + nextRefillTime = now + refillInterval; + return limit; + } if (now < nextRefillTime) { return 0; } - nextRefillTime = now + super.getTimeUnitInMillis(); - return limit; + long diff = refillInterval + now - nextRefillTime; + long refills = diff / refillInterval; + nextRefillTime = now + refillInterval; + long refillAmount = refills * getRefillIntervalAdjustedLimit(limit); + return Math.min(limit, refillAmount); } @Override public long getWaitInterval(long limit, long available, long amount) { + // adjust the limit based on the refill interval + limit = getRefillIntervalAdjustedLimit(limit); + if (nextRefillTime == -1) { return 0; } @@ -62,7 +101,11 @@ public long getWaitInterval(long limit, long available, long amount) { if (diff % limit == 0) { extraRefillsNecessary--; } - return nextRefillInterval + (extraRefillsNecessary * super.getTimeUnitInMillis()); + return nextRefillInterval + (extraRefillsNecessary * refillInterval); + } + + private long getRefillIntervalAdjustedLimit(long limit) { + return (long) Math.ceil(refillInterval / (double) getTimeUnitInMillis() * limit); } // This method is for strictly testing purpose only diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java index 5c69ad5d6cd5..9474fdf8a028 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java @@ -35,7 +35,9 @@ + "are mostly synchronized...but to me it looks like they are totally synchronized") public abstract class RateLimiter { public static final String QUOTA_RATE_LIMITER_CONF_KEY = "hbase.quota.rate.limiter"; - private long tunit = 1000; // Timeunit factor for translating to ms. + public static final long DEFAULT_TIME_UNIT = 1000; + + private long tunit = DEFAULT_TIME_UNIT; // Timeunit factor for translating to ms. private long limit = Long.MAX_VALUE; // The max value available resource units can be refilled to. private long avail = Long.MAX_VALUE; // Currently available resource units @@ -157,7 +159,7 @@ public synchronized long getWaitIntervalMs(final long amount) { * @param amount the number of required resources, a non-negative number * @return true if there are enough available resources, otherwise false */ - private boolean isAvailable(final long amount) { + protected boolean isAvailable(final long amount) { if (isBypass()) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index 483edbcd3a4f..e6e143343f72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -49,15 +49,17 @@ private TimeBasedLimiter() { conf.getClass(RateLimiter.QUOTA_RATE_LIMITER_CONF_KEY, AverageIntervalRateLimiter.class) .getName()) ) { - reqsLimiter = new FixedIntervalRateLimiter(); - reqSizeLimiter = new FixedIntervalRateLimiter(); - writeReqsLimiter = new FixedIntervalRateLimiter(); - writeSizeLimiter = new FixedIntervalRateLimiter(); - readReqsLimiter = new FixedIntervalRateLimiter(); - readSizeLimiter = new FixedIntervalRateLimiter(); - reqCapacityUnitLimiter = new FixedIntervalRateLimiter(); - writeCapacityUnitLimiter = new FixedIntervalRateLimiter(); - readCapacityUnitLimiter = new FixedIntervalRateLimiter(); + long refillInterval = conf.getLong(FixedIntervalRateLimiter.RATE_LIMITER_REFILL_INTERVAL_MS, + RateLimiter.DEFAULT_TIME_UNIT); + reqsLimiter = new FixedIntervalRateLimiter(refillInterval); + reqSizeLimiter = new FixedIntervalRateLimiter(refillInterval); + writeReqsLimiter = new FixedIntervalRateLimiter(refillInterval); + writeSizeLimiter = new FixedIntervalRateLimiter(refillInterval); + readReqsLimiter = new FixedIntervalRateLimiter(refillInterval); + readSizeLimiter = new FixedIntervalRateLimiter(refillInterval); + reqCapacityUnitLimiter = new FixedIntervalRateLimiter(refillInterval); + writeCapacityUnitLimiter = new FixedIntervalRateLimiter(refillInterval); + readCapacityUnitLimiter = new FixedIntervalRateLimiter(refillInterval); } else { reqsLimiter = new AverageIntervalRateLimiter(); reqSizeLimiter = new AverageIntervalRateLimiter(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java index ae9b96d7a6c7..721c0df8525f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -427,4 +429,71 @@ public void testLimiterCompensationOverflow() throws InterruptedException { avgLimiter.consume(-80); assertEquals(limit, avgLimiter.getAvailable()); } + + @Test + public void itRunsFullWithPartialRefillInterval() { + RateLimiter limiter = new FixedIntervalRateLimiter(100); + limiter.set(10, TimeUnit.SECONDS); + assertEquals(0, limiter.getWaitIntervalMs()); + + // Consume the quota + limiter.consume(10); + + // Need to wait 1s to acquire another resource + long waitInterval = limiter.waitInterval(10); + assertTrue(900 < waitInterval); + assertTrue(1000 >= waitInterval); + // We need to wait 2s to acquire more than 10 resources + waitInterval = limiter.waitInterval(20); + assertTrue(1900 < waitInterval); + assertTrue(2000 >= waitInterval); + + limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); + // We've waited the full interval, so we should now have 10 + assertEquals(0, limiter.getWaitIntervalMs(10)); + assertEquals(0, limiter.waitInterval()); + } + + @Test + public void itRunsPartialRefillIntervals() { + RateLimiter limiter = new FixedIntervalRateLimiter(100); + limiter.set(10, TimeUnit.SECONDS); + assertEquals(0, limiter.getWaitIntervalMs()); + + // Consume the quota + limiter.consume(10); + + // Need to wait 1s to acquire another resource + long waitInterval = limiter.waitInterval(10); + assertTrue(900 < waitInterval); + assertTrue(1000 >= waitInterval); + // We need to wait 2s to acquire more than 10 resources + waitInterval = limiter.waitInterval(20); + assertTrue(1900 < waitInterval); + assertTrue(2000 >= waitInterval); + // We need to wait 0<=x<=100ms to acquire 1 resource + waitInterval = limiter.waitInterval(1); + assertTrue(0 < waitInterval); + assertTrue(100 >= waitInterval); + + limiter.setNextRefillTime(limiter.getNextRefillTime() - 500); + // We've waited half the interval, so we should now have half available + assertEquals(0, limiter.getWaitIntervalMs(5)); + assertEquals(0, limiter.waitInterval()); + } + + @Test + public void itRunsRepeatedPartialRefillIntervals() { + RateLimiter limiter = new FixedIntervalRateLimiter(100); + limiter.set(10, TimeUnit.SECONDS); + assertEquals(0, limiter.getWaitIntervalMs()); + // Consume the quota + limiter.consume(10); + for (int i = 0; i < 100; i++) { + limiter.setNextRefillTime(limiter.getNextRefillTime() - 100); // free 1 resource + limiter.consume(1); + assertFalse(limiter.isAvailable(1)); // all resources consumed + assertTrue(limiter.isAvailable(0)); // not negative + } + } } From 38aef80ffb66bb103acf201a818dfd5ca8bde090 Mon Sep 17 00:00:00 2001 From: Bri Augenreich Date: Mon, 25 Mar 2024 15:07:31 -0400 Subject: [PATCH 295/514] HBASE-28449 Fix backupSystemTable prefix scans (#5768) Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/backup/impl/BackupSystemTable.java | 12 ++---------- .../hadoop/hbase/backup/TestBackupSystemTable.java | 9 +++++++++ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 55f225f41cf1..682757dbc404 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1438,11 +1438,7 @@ private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] sm */ private Scan createScanForReadLogTimestampMap(String backupRoot) { Scan scan = new Scan(); - byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.withStartRow(startRow); - scan.withStopRow(stopRow); + scan.setStartStopRowForPrefixScan(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL)); scan.addFamily(BackupSystemTable.META_FAMILY); return scan; @@ -1479,11 +1475,7 @@ private Put createPutForRegionServerLastLogRollResult(String server, Long timest */ private Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) { Scan scan = new Scan(); - byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.withStartRow(startRow); - scan.withStopRow(stopRow); + scan.setStartStopRowForPrefixScan(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL)); scan.addFamily(BackupSystemTable.META_FAMILY); scan.readVersions(1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index 21883fa6eaad..51e266032cea 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -190,8 +190,11 @@ public void testRegionServerLastLogRollResults() throws IOException { String[] servers = new String[] { "server1", "server2", "server3" }; Long[] timestamps = new Long[] { 100L, 102L, 107L }; + // validate the prefix scan in readRegionServerlastLogRollResult will get the right timestamps + // when a backup root with the same prefix is present for (int i = 0; i < servers.length; i++) { table.writeRegionServerLastLogRollResult(servers[i], timestamps[i], "root"); + table.writeRegionServerLastLogRollResult(servers[i], timestamps[i], "root/backup"); } HashMap result = table.readRegionServerLastLogRollResult("root"); @@ -265,7 +268,10 @@ public void testRegionServerLogTimestampMap() throws IOException { rsTimestampMap.put("rs2:100", 101L); rsTimestampMap.put("rs3:100", 103L); + // validate the prefix scan in readLogTimestampMap will get the right timestamps + // when a backup root with the same prefix is present table.writeRegionServerLogTimestamp(tables, rsTimestampMap, "root"); + table.writeRegionServerLogTimestamp(tables, rsTimestampMap, "root/backup"); Map> result = table.readLogTimestampMap("root"); @@ -291,7 +297,10 @@ public void testRegionServerLogTimestampMap() throws IOException { rsTimestampMap1.put("rs2:100", 201L); rsTimestampMap1.put("rs3:100", 203L); + // validate the prefix scan in readLogTimestampMap will get the right timestamps + // when a backup root with the same prefix is present table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1, "root"); + table.writeRegionServerLogTimestamp(tables1, rsTimestampMap, "root/backup"); result = table.readLogTimestampMap("root"); From feef7446f28613d22323a7b992e0c363cac589ee Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 26 Mar 2024 10:31:15 -0400 Subject: [PATCH 296/514] HBASE-28456 HBase Restore restores old data if data for the same timestamp is in different hfiles (#5775) Signed-off-by: Nick Dimiduk Signed-off-by: Duo Zhang --- .../mapreduce/MapReduceHFileSplitterJob.java | 4 + .../TestBackupRestoreWithModifications.java | 276 ++++++++++++++++++ .../hbase/mapreduce/HFileInputFormat.java | 8 + .../hbase/mapreduce/HFileOutputFormat2.java | 6 +- .../hbase/regionserver/HRegionFileSystem.java | 2 +- .../hadoop/hbase/regionserver/HStoreFile.java | 22 +- .../hbase/regionserver/StoreFileInfo.java | 49 ++++ 7 files changed, 348 insertions(+), 19 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index 766a99d778b8..755b0a41e32c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -99,6 +99,10 @@ public Job createSubmittableJob(String[] args) throws IOException { conf.set(FileInputFormat.INPUT_DIR, inputDirs); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); + // MapReduceHFileSplitter needs ExtendedCellSerialization so that sequenceId can be propagated + // when sorting cells in CellSortReducer + job.getConfiguration().setBoolean(HFileOutputFormat2.EXTENDED_CELL_SERIALIZATION_ENABLED_KEY, + true); job.setJarByClass(MapReduceHFileSplitterJob.class); job.setInputFormatClass(HFileInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java new file mode 100644 index 000000000000..d01df687edac --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE; +import static org.apache.hadoop.hbase.backup.BackupType.FULL; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testing.TestingHBaseCluster; +import org.apache.hadoop.hbase.testing.TestingHBaseClusterOption; +import org.apache.hadoop.hbase.tool.BulkLoadHFiles; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category(MediumTests.class) +@RunWith(Parameterized.class) +public class TestBackupRestoreWithModifications { + + private static final Logger LOG = + LoggerFactory.getLogger(TestBackupRestoreWithModifications.class); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupRestoreWithModifications.class); + + @Parameterized.Parameters(name = "{index}: useBulkLoad={0}") + public static Iterable data() { + return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; + } + + @Parameterized.Parameter(0) + public boolean useBulkLoad; + + private TableName sourceTable; + private TableName targetTable; + + private List allTables; + private static TestingHBaseCluster cluster; + private static final Path BACKUP_ROOT_DIR = new Path("backupIT"); + private static final byte[] COLUMN_FAMILY = Bytes.toBytes("0"); + + @BeforeClass + public static void beforeClass() throws Exception { + Configuration conf = HBaseConfiguration.create(); + enableBackup(conf); + cluster = TestingHBaseCluster.create(TestingHBaseClusterOption.builder().conf(conf).build()); + cluster.start(); + } + + @AfterClass + public static void afterClass() throws Exception { + cluster.stop(); + } + + @Before + public void setUp() throws Exception { + sourceTable = TableName.valueOf("table-" + useBulkLoad); + targetTable = TableName.valueOf("another-table-" + useBulkLoad); + allTables = Arrays.asList(sourceTable, targetTable); + createTable(sourceTable); + createTable(targetTable); + } + + @Test + public void testModificationsOnTable() throws Exception { + Instant timestamp = Instant.now(); + + // load some data + load(sourceTable, timestamp, "data"); + + String backupId = backup(FULL, allTables); + BackupInfo backupInfo = verifyBackup(backupId, FULL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + + restore(backupId, sourceTable, targetTable); + validateDataEquals(sourceTable, "data"); + validateDataEquals(targetTable, "data"); + + // load new data on the same timestamp + load(sourceTable, timestamp, "changed_data"); + + backupId = backup(FULL, allTables); + backupInfo = verifyBackup(backupId, FULL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + + restore(backupId, sourceTable, targetTable); + validateDataEquals(sourceTable, "changed_data"); + validateDataEquals(targetTable, "changed_data"); + } + + private void createTable(TableName tableName) throws IOException { + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(COLUMN_FAMILY)); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Admin admin = connection.getAdmin()) { + admin.createTable(builder.build()); + } + } + + private void load(TableName tableName, Instant timestamp, String data) throws IOException { + if (useBulkLoad) { + hFileBulkLoad(tableName, timestamp, data); + } else { + putLoad(tableName, timestamp, data); + } + } + + private void putLoad(TableName tableName, Instant timestamp, String data) throws IOException { + LOG.info("Writing new data to HBase using normal Puts: {}", data); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf())) { + Table table = connection.getTable(sourceTable); + List puts = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + Put put = new Put(Bytes.toBytes(i), timestamp.toEpochMilli()); + put.addColumn(COLUMN_FAMILY, Bytes.toBytes("data"), Bytes.toBytes(data)); + puts.add(put); + + if (i % 100 == 0) { + table.put(puts); + puts.clear(); + } + } + if (!puts.isEmpty()) { + table.put(puts); + } + connection.getAdmin().flush(tableName); + } + } + + private void hFileBulkLoad(TableName tableName, Instant timestamp, String data) + throws IOException { + FileSystem fs = FileSystem.get(cluster.getConf()); + LOG.info("Writing new data to HBase using BulkLoad: {}", data); + // HFiles require this strict directory structure to allow to load them + Path hFileRootPath = new Path("/tmp/hfiles_" + UUID.randomUUID()); + fs.mkdirs(hFileRootPath); + Path hFileFamilyPath = new Path(hFileRootPath, Bytes.toString(COLUMN_FAMILY)); + fs.mkdirs(hFileFamilyPath); + try (HFile.Writer writer = HFile.getWriterFactoryNoCache(cluster.getConf()) + .withPath(fs, new Path(hFileFamilyPath, "hfile_" + UUID.randomUUID())) + .withFileContext(new HFileContextBuilder().withTableName(tableName.toBytes()) + .withColumnFamily(COLUMN_FAMILY).build()) + .create()) { + for (int i = 0; i < 10; i++) { + writer.append(new KeyValue(Bytes.toBytes(i), COLUMN_FAMILY, Bytes.toBytes("data"), + timestamp.toEpochMilli(), Bytes.toBytes(data))); + } + } + Map result = + BulkLoadHFiles.create(cluster.getConf()).bulkLoad(tableName, hFileRootPath); + assertFalse(result.isEmpty()); + } + + private String backup(BackupType backupType, List tables) throws IOException { + LOG.info("Creating the backup ..."); + + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + BackupRequest backupRequest = + new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString()) + .withTableList(new ArrayList<>(tables)).withBackupType(backupType).build(); + return backupAdmin.backupTables(backupRequest); + } + + } + + private void restore(String backupId, TableName sourceTableName, TableName targetTableName) + throws IOException { + LOG.info("Restoring data ..."); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + RestoreRequest restoreRequest = new RestoreRequest.Builder().withBackupId(backupId) + .withBackupRootDir(BACKUP_ROOT_DIR.toString()).withOvewrite(true) + .withFromTables(new TableName[] { sourceTableName }) + .withToTables(new TableName[] { targetTableName }).build(); + backupAdmin.restore(restoreRequest); + } + } + + private void validateDataEquals(TableName tableName, String expectedData) throws IOException { + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Table table = connection.getTable(tableName)) { + Scan scan = new Scan(); + scan.readAllVersions(); + scan.setRaw(true); + scan.setBatch(100); + + for (Result sourceResult : table.getScanner(scan)) { + List sourceCells = sourceResult.listCells(); + for (Cell cell : sourceCells) { + assertEquals(expectedData, Bytes.toStringBinary(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); + } + } + } + } + + private BackupInfo verifyBackup(String backupId, BackupType expectedType, + BackupInfo.BackupState expectedState) throws IOException { + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId); + + // Verify managed backup in HBase + assertEquals(backupId, backupInfo.getBackupId()); + assertEquals(expectedState, backupInfo.getState()); + assertEquals(expectedType, backupInfo.getType()); + return backupInfo; + } + } + + private static void enableBackup(Configuration conf) { + // Enable backup + conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); + BackupManager.decorateMasterConfiguration(conf); + BackupManager.decorateRegionServerConfiguration(conf); + } + +} diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 3ccbaab4de12..1fdcf4bcfd44 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -21,15 +21,18 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.OptionalLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; @@ -78,6 +81,7 @@ private static class HFileRecordReader extends RecordReader private Cell value = null; private long count; private boolean seeked = false; + private OptionalLong bulkloadSeqId; @Override public void initialize(InputSplit split, TaskAttemptContext context) @@ -88,6 +92,7 @@ public void initialize(InputSplit split, TaskAttemptContext context) FileSystem fs = path.getFileSystem(conf); LOG.info("Initialize HFileRecordReader for {}", path); this.in = HFile.createReader(fs, path, conf); + this.bulkloadSeqId = StoreFileInfo.getBulkloadSeqId(path); // The file info must be loaded before the scanner can be used. // This seems like a bug in HBase, but it's easily worked around. @@ -109,6 +114,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { return false; } value = scanner.getCell(); + if (value != null && bulkloadSeqId.isPresent()) { + PrivateCellUtil.setSequenceId(value, bulkloadSeqId.getAsLong()); + } count++; return true; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 5c6ef57fad6d..fcbcd2d9f59f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -162,10 +162,10 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) /** * ExtendedCell and ExtendedCellSerialization are InterfaceAudience.Private. We expose this config - * package-private for internal usage for jobs like WALPlayer which need to use features of - * ExtendedCell. + * for internal usage in jobs like WALPlayer which need to use features of ExtendedCell. */ - static final String EXTENDED_CELL_SERIALIZATION_ENABLED_KEY = + @InterfaceAudience.Private + public static final String EXTENDED_CELL_SERIALIZATION_ENABLED_KEY = "hbase.mapreduce.hfileoutputformat.extendedcell.enabled"; static final boolean EXTENDED_CELL_SERIALIZATION_ENABLED_DEFULT = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 48afdc59f86f..6fccccfc8203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -490,7 +490,7 @@ private Path preCommitStoreFile(final String familyName, final Path buildPath, f String name = buildPath.getName(); if (generateNewName) { - name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_"); + name = generateUniqueName((seqNum < 0) ? null : StoreFileInfo.formatBulkloadSeqId(seqNum)); } Path dstPath = new Path(storeDir, name); if (!fs.exists(buildPath)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index ae514f0aef8d..5df02bfb26a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -329,13 +329,8 @@ public byte[] getMetadataValue(byte[] key) { @Override public boolean isBulkLoadResult() { - boolean bulkLoadedHFile = false; - String fileName = this.getPath().getName(); - int startPos = fileName.indexOf("SeqId_"); - if (startPos != -1) { - bulkLoadedHFile = true; - } - return bulkLoadedHFile || (metadataMap != null && metadataMap.containsKey(BULKLOAD_TIME_KEY)); + return StoreFileInfo.hasBulkloadSeqId(this.getPath()) + || (metadataMap != null && metadataMap.containsKey(BULKLOAD_TIME_KEY)); } public boolean isCompactedAway() { @@ -413,19 +408,16 @@ private void open() throws IOException { } if (isBulkLoadResult()) { - // generate the sequenceId from the fileName - // fileName is of the form _SeqId__ - String fileName = this.getPath().getName(); - // Use lastIndexOf() to get the last, most recent bulk load seqId. - int startPos = fileName.lastIndexOf("SeqId_"); - if (startPos != -1) { - this.sequenceid = - Long.parseLong(fileName.substring(startPos + 6, fileName.indexOf('_', startPos + 6))); + // For bulkloads, we have to parse the sequenceid from the path name + OptionalLong sequenceId = StoreFileInfo.getBulkloadSeqId(this.getPath()); + if (sequenceId.isPresent()) { + this.sequenceid = sequenceId.getAsLong(); // Handle reference files as done above. if (fileInfo.isTopReference()) { this.sequenceid += 1; } } + // SKIP_RESET_SEQ_ID only works in bulk loaded file. // In mob compaction, the hfile where the cells contain the path of a new mob file is bulk // loaded to hbase, these cells have the same seqIds with the old ones. We do not want diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 1ebe93deff65..052dd5112319 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -19,6 +19,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.util.OptionalLong; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -423,6 +424,54 @@ public String toString() { + (isReference() ? "->" + getReferredToFile(this.getPath()) + "-" + reference : ""); } + /** + * Cells in a bulkloaded file don't have a sequenceId since they don't go through memstore. When a + * bulkload file is committed, the current memstore ts is stamped onto the file name as the + * sequenceId of the file. At read time, the sequenceId is copied onto all of the cells returned + * so that they can be properly sorted relative to other cells in other files. Further, when + * opening multiple files for scan, the sequence id is used to ensusre that the bulkload file's + * scanner is porperly sorted amongst the other scanners. Non-bulkloaded files get their + * sequenceId from the MAX_MEMSTORE_TS_KEY since those go through the memstore and have true + * sequenceIds. + */ + private static final String SEQ_ID_MARKER = "_SeqId_"; + private static final int SEQ_ID_MARKER_LENGTH = SEQ_ID_MARKER.length(); + + /** + * @see #SEQ_ID_MARKER + * @return True if the file name looks like a bulkloaded file, based on the presence of the SeqId + * marker added to those files. + */ + public static boolean hasBulkloadSeqId(final Path path) { + String fileName = path.getName(); + return fileName.contains(SEQ_ID_MARKER); + } + + /** + * @see #SEQ_ID_MARKER + * @return If the path is a properly named bulkloaded file, returns the sequence id stamped at the + * end of the file name. + */ + public static OptionalLong getBulkloadSeqId(final Path path) { + String fileName = path.getName(); + int startPos = fileName.indexOf(SEQ_ID_MARKER); + if (startPos != -1) { + String strVal = fileName.substring(startPos + SEQ_ID_MARKER_LENGTH, + fileName.indexOf('_', startPos + SEQ_ID_MARKER_LENGTH)); + return OptionalLong.of(Long.parseLong(strVal)); + } + return OptionalLong.empty(); + } + + /** + * @see #SEQ_ID_MARKER + * @return A string value for appending to the end of a bulkloaded file name, containing the + * properly formatted SeqId marker. + */ + public static String formatBulkloadSeqId(long seqId) { + return SEQ_ID_MARKER + seqId + "_"; + } + /** * @param path Path to check. * @return True if the path has format of a HFile. From 78923b75e47d4d2eb5749eaff76b7478171e8224 Mon Sep 17 00:00:00 2001 From: Ruben Van Wanzeele Date: Tue, 26 Mar 2024 13:15:00 +0100 Subject: [PATCH 297/514] HBASE-28412 Select correct target table for incremental backup restore (#5776) Contributed-by: Ruben Van Wanzeele Signed-off-by: Bryan Beaudreault --- .../backup/mapreduce/MapReduceRestoreJob.java | 4 +- .../hadoop/hbase/backup/BackupTestUtil.java | 55 ++++ .../TestBackupRestoreOnEmptyEnvironment.java | 257 ++++++++++++++++++ .../TestBackupRestoreWithModifications.java | 28 +- 4 files changed, 317 insertions(+), 27 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index 55f6bff04cb5..5d654c0d85b5 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -74,9 +74,7 @@ public void run(Path[] dirPaths, TableName[] tableNames, Path restoreRootDir, BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf()); Configuration conf = getConf(); conf.set(bulkOutputConfKey, bulkOutputPath.toString()); - String[] playerArgs = { dirs, - fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i].getNameAsString() }; - + String[] playerArgs = { dirs, newTableNames[i].getNameAsString() }; int result; try { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java new file mode 100644 index 000000000000..3665eeb7a76c --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class BackupTestUtil { + private BackupTestUtil() { + } + + static BackupInfo verifyBackup(Configuration conf, String backupId, BackupType expectedType, + BackupInfo.BackupState expectedState) throws IOException { + try (Connection connection = ConnectionFactory.createConnection(conf); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId); + + // Verify managed backup in HBase + assertEquals(backupId, backupInfo.getBackupId()); + assertEquals(expectedState, backupInfo.getState()); + assertEquals(expectedType, backupInfo.getType()); + return backupInfo; + } + } + + static void enableBackup(Configuration conf) { + // Enable backup + conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); + BackupManager.decorateMasterConfiguration(conf); + BackupManager.decorateRegionServerConfiguration(conf); + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java new file mode 100644 index 000000000000..300ca360a4ee --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE; +import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup; +import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup; +import static org.apache.hadoop.hbase.backup.BackupType.FULL; +import static org.apache.hadoop.hbase.backup.BackupType.INCREMENTAL; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testing.TestingHBaseCluster; +import org.apache.hadoop.hbase.testing.TestingHBaseClusterOption; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category(MediumTests.class) +@RunWith(Parameterized.class) +public class TestBackupRestoreOnEmptyEnvironment { + + private static final Logger LOG = + LoggerFactory.getLogger(TestBackupRestoreOnEmptyEnvironment.class); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupRestoreOnEmptyEnvironment.class); + + @Parameterized.Parameters(name = "{index}: restoreToOtherTable={0}") + public static Iterable data() { + return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; + } + + @Parameterized.Parameter(0) + public boolean restoreToOtherTable; + private TableName sourceTable; + private TableName targetTable; + + private static TestingHBaseCluster cluster; + private static Path BACKUP_ROOT_DIR; + private static final byte[] COLUMN_FAMILY = Bytes.toBytes("0"); + + @BeforeClass + public static void beforeClass() throws Exception { + Configuration conf = HBaseConfiguration.create(); + enableBackup(conf); + cluster = TestingHBaseCluster.create(TestingHBaseClusterOption.builder().conf(conf).build()); + cluster.start(); + BACKUP_ROOT_DIR = new Path(new Path(conf.get("fs.defaultFS")), new Path("/backupIT")); + } + + @AfterClass + public static void afterClass() throws Exception { + cluster.stop(); + } + + @Before + public void setUp() throws Exception { + sourceTable = TableName.valueOf("table"); + targetTable = TableName.valueOf("another-table"); + createTable(sourceTable); + createTable(targetTable); + } + + @After + public void removeTables() throws Exception { + deleteTables(); + } + + @Test + public void testRestoreToCorrectTable() throws Exception { + Instant timestamp = Instant.now().minusSeconds(10); + + // load some data + putLoad(sourceTable, timestamp, "data"); + + String backupId = backup(FULL, Collections.singletonList(sourceTable)); + BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + + LOG.info("Deleting the tables before restore ..."); + deleteTables(); + + if (restoreToOtherTable) { + restore(backupId, sourceTable, targetTable); + validateDataEquals(targetTable, "data"); + } else { + restore(backupId, sourceTable, sourceTable); + validateDataEquals(sourceTable, "data"); + } + + } + + @Test + public void testRestoreCorrectTableForIncremental() throws Exception { + Instant timestamp = Instant.now().minusSeconds(10); + + // load some data + putLoad(sourceTable, timestamp, "data"); + + String backupId = backup(FULL, Collections.singletonList(sourceTable)); + verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE); + + // some incremental data + putLoad(sourceTable, timestamp.plusMillis(1), "new_data"); + + String backupId2 = backup(INCREMENTAL, Collections.singletonList(sourceTable)); + verifyBackup(cluster.getConf(), backupId2, INCREMENTAL, COMPLETE); + + LOG.info("Deleting the tables before restore ..."); + deleteTables(); + + if (restoreToOtherTable) { + restore(backupId2, sourceTable, targetTable); + validateDataEquals(targetTable, "new_data"); + } else { + restore(backupId2, sourceTable, sourceTable); + validateDataEquals(sourceTable, "new_data"); + } + + } + + private void createTable(TableName tableName) throws IOException { + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(COLUMN_FAMILY)); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Admin admin = connection.getAdmin()) { + admin.createTable(builder.build()); + } + } + + private void deleteTables() throws IOException { + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Admin admin = connection.getAdmin()) { + for (TableName table : Arrays.asList(sourceTable, targetTable)) { + if (admin.tableExists(table)) { + admin.disableTable(table); + admin.deleteTable(table); + } + } + } + } + + private void putLoad(TableName tableName, Instant timestamp, String data) throws IOException { + LOG.info("Writing new data to HBase using normal Puts: {}", data); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf())) { + Table table = connection.getTable(sourceTable); + List puts = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + Put put = new Put(Bytes.toBytes(i), timestamp.toEpochMilli()); + put.addColumn(COLUMN_FAMILY, Bytes.toBytes("data"), Bytes.toBytes(data)); + puts.add(put); + + if (i % 100 == 0) { + table.put(puts); + puts.clear(); + } + } + if (!puts.isEmpty()) { + table.put(puts); + } + connection.getAdmin().flush(tableName); + } + } + + private String backup(BackupType backupType, List tables) throws IOException { + LOG.info("Creating the backup ..."); + + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + BackupRequest backupRequest = + new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString()) + .withTableList(new ArrayList<>(tables)).withBackupType(backupType).build(); + return backupAdmin.backupTables(backupRequest); + } + + } + + private void restore(String backupId, TableName sourceTableName, TableName targetTableName) + throws IOException { + LOG.info("Restoring data ..."); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + RestoreRequest restoreRequest = new RestoreRequest.Builder().withBackupId(backupId) + .withBackupRootDir(BACKUP_ROOT_DIR.toString()).withOvewrite(true) + .withFromTables(new TableName[] { sourceTableName }) + .withToTables(new TableName[] { targetTableName }).build(); + backupAdmin.restore(restoreRequest); + } + } + + private void validateDataEquals(TableName tableName, String expectedData) throws IOException { + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Table table = connection.getTable(tableName)) { + Scan scan = new Scan(); + scan.setRaw(true); + scan.setBatch(100); + + for (Result sourceResult : table.getScanner(scan)) { + List sourceCells = sourceResult.listCells(); + for (Cell cell : sourceCells) { + assertEquals(expectedData, Bytes.toStringBinary(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); + } + } + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java index d01df687edac..62ba5006ac7b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.backup; import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE; +import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup; +import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup; import static org.apache.hadoop.hbase.backup.BackupType.FULL; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -126,7 +127,7 @@ public void testModificationsOnTable() throws Exception { load(sourceTable, timestamp, "data"); String backupId = backup(FULL, allTables); - BackupInfo backupInfo = verifyBackup(backupId, FULL, COMPLETE); + BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE); assertTrue(backupInfo.getTables().contains(sourceTable)); restore(backupId, sourceTable, targetTable); @@ -137,7 +138,7 @@ public void testModificationsOnTable() throws Exception { load(sourceTable, timestamp, "changed_data"); backupId = backup(FULL, allTables); - backupInfo = verifyBackup(backupId, FULL, COMPLETE); + backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE); assertTrue(backupInfo.getTables().contains(sourceTable)); restore(backupId, sourceTable, targetTable); @@ -252,25 +253,4 @@ private void validateDataEquals(TableName tableName, String expectedData) throws } } - private BackupInfo verifyBackup(String backupId, BackupType expectedType, - BackupInfo.BackupState expectedState) throws IOException { - try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); - BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { - BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId); - - // Verify managed backup in HBase - assertEquals(backupId, backupInfo.getBackupId()); - assertEquals(expectedState, backupInfo.getState()); - assertEquals(expectedType, backupInfo.getType()); - return backupInfo; - } - } - - private static void enableBackup(Configuration conf) { - // Enable backup - conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); - BackupManager.decorateMasterConfiguration(conf); - BackupManager.decorateRegionServerConfiguration(conf); - } - } From 298c550c804305f2c57029a563039eefcbb4af40 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Wed, 27 Mar 2024 10:04:14 +0000 Subject: [PATCH 298/514] HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache recovery from file (#5769) --- .../hbase/io/hfile/bucket/BucketCache.java | 16 ++++----- .../io/hfile/bucket/BucketProtoUtils.java | 14 +++++--- .../TestRecoveryPersistentBucketCache.java | 33 +++++++++++++++++++ 3 files changed, 50 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 57f71b31894e..912a3ab524fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -222,13 +222,8 @@ public class BucketCache implements BlockCache, HeapSize { */ transient final IdReadWriteLock offsetLock; - final NavigableSet blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> { - int nameComparison = a.getHfileName().compareTo(b.getHfileName()); - if (nameComparison != 0) { - return nameComparison; - } - return Long.compare(a.getOffset(), b.getOffset()); - }); + NavigableSet blocksByHFile = new ConcurrentSkipListSet<>( + Comparator.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset)); /** Statistics thread schedule pool (for heavy debugging, could remove) */ private transient final ScheduledExecutorService scheduleThreadPool = @@ -1471,8 +1466,11 @@ private void verifyCapacityAndClasses(long capacitySize, String ioclass, String } private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOException { - backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), - this::createRecycler); + Pair, NavigableSet> pair = + BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), + this::createRecycler); + backingMap = pair.getFirst(); + blocksByHFile = pair.getSecond(); fullyCachedFiles.clear(); fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap())); if (proto.hasChecksum()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index 7cc5050506e4..4b42414fb9c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -18,9 +18,12 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; +import java.util.Comparator; import java.util.HashMap; import java.util.Map; +import java.util.NavigableSet; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListSet; import java.util.function.Function; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; @@ -121,10 +124,12 @@ private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { } } - static ConcurrentHashMap fromPB(Map deserializers, - BucketCacheProtos.BackingMap backingMap, Function createRecycler) - throws IOException { + static Pair, NavigableSet> fromPB( + Map deserializers, BucketCacheProtos.BackingMap backingMap, + Function createRecycler) throws IOException { ConcurrentHashMap result = new ConcurrentHashMap<>(); + NavigableSet resultSet = new ConcurrentSkipListSet<>(Comparator + .comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset)); for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) { BucketCacheProtos.BlockCacheKey protoKey = entry.getKey(); BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), protoKey.getOffset(), @@ -153,8 +158,9 @@ static ConcurrentHashMap fromPB(Map throw new IOException("Unknown deserializer class found: " + deserializerClass); } result.put(key, value); + resultSet.add(key); } - return result; + return new Pair<>(result, resultSet); } private static BlockType fromPb(BucketCacheProtos.BlockType blockType) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java index ad91d01f8cfd..63ff334826d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java @@ -108,6 +108,39 @@ public void testBucketCacheRecovery() throws Exception { TEST_UTIL.cleanupTestDir(); } + @Test + public void testBucketCacheEvictByHFileAfterRecovery() throws Exception { + HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + Path testDir = TEST_UTIL.getDataTestDir(); + TEST_UTIL.getTestFileSystem().mkdirs(testDir); + Configuration conf = HBaseConfiguration.create(); + // Disables the persister thread by setting its interval to MAX_VALUE + conf.setLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, Long.MAX_VALUE); + int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, conf); + + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 4); + + // Add four blocks + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[0].getBlockName(), blocks[0].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[1].getBlockName(), blocks[1].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[2].getBlockName(), blocks[2].getBlock()); + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[3].getBlockName(), blocks[3].getBlock()); + // saves the current state of the cache + bucketCache.persistToFile(); + + BucketCache newBucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, conf); + Thread.sleep(100); + assertEquals(4, newBucketCache.backingMap.size()); + newBucketCache.evictBlocksByHfileName(blocks[0].getBlockName().getHfileName()); + assertEquals(3, newBucketCache.backingMap.size()); + TEST_UTIL.cleanupTestDir(); + } + private void waitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey) throws InterruptedException { while (!cache.backingMap.containsKey(cacheKey) || cache.ramCache.containsKey(cacheKey)) { From 5a0c4de66b16281b25fd14bfbe2ee60d0657b8a3 Mon Sep 17 00:00:00 2001 From: ConfX <114765570+teamconfx@users.noreply.github.com> Date: Thu, 28 Mar 2024 23:01:23 +0800 Subject: [PATCH 299/514] HBASE-27990 BucketCache causes ArithmeticException due to improper blockSize value checking (#5389) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 912a3ab524fe..cd65050baa76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -283,6 +283,7 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration, Configuration conf) throws IOException { + Preconditions.checkArgument(blockSize > 0, "BucketCache capacity is set to " + blockSize + ", can not be less than 0"); boolean useStrongRef = conf.getBoolean(STRONG_REF_KEY, STRONG_REF_DEFAULT); if (useStrongRef) { this.offsetLock = new IdReadWriteLockStrongRef<>(); From bfc537562c86c5b41a52a1eff50bc6c85e89caf9 Mon Sep 17 00:00:00 2001 From: ConfX <114765570+teamconfx@users.noreply.github.com> Date: Thu, 28 Mar 2024 23:01:40 +0800 Subject: [PATCH 300/514] HBASE-27989 ByteBuffAllocator causes ArithmeticException due to improper poolBufSize value checking (#5388) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/io/ByteBuffAllocator.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java index 60b89223c169..737d93207cbb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java @@ -171,11 +171,19 @@ public static ByteBuffAllocator create(Configuration conf, boolean reservoirEnab // that by the time a handler originated response is actually done writing to socket and so // released the BBs it used, the handler might have processed one more read req. On an avg 2x // we consider and consider that also for the max buffers to pool + if (poolBufSize <= 0) { + throw new IllegalArgumentException(BUFFER_SIZE_KEY + " must be positive. Please disable " + + "the reservoir rather than setting the size of the buffer to zero or negative."); + } int bufsForTwoMB = (2 * 1024 * 1024) / poolBufSize; int maxBuffCount = conf.getInt(MAX_BUFFER_COUNT_KEY, conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * bufsForTwoMB * 2); int minSizeForReservoirUse = conf.getInt(MIN_ALLOCATE_SIZE_KEY, poolBufSize / 6); + if (minSizeForReservoirUse <= 0) { + LOG.warn("The minimal size for reservoir use is less or equal to zero, all allocations " + + "will be from the pool. Set a higher " + MIN_ALLOCATE_SIZE_KEY + " to avoid this."); + } Class clazz = conf.getClass(BYTEBUFF_ALLOCATOR_CLASS, ByteBuffAllocator.class); return (ByteBuffAllocator) ReflectionUtils.newInstance(clazz, true, maxBuffCount, poolBufSize, minSizeForReservoirUse); From f412bdbb9ad9810e9bf3db0b76f7f1dffff3d15d Mon Sep 17 00:00:00 2001 From: ConfX <114765570+teamconfx@users.noreply.github.com> Date: Thu, 28 Mar 2024 23:02:07 +0800 Subject: [PATCH 301/514] HBASE-28452 Missing null check of rpcServer.scheduler.executor causes NPE with invalid value of hbase.client.default.rpc.codec (#5778) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index b51154fc24eb..842366d625f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -68,7 +68,9 @@ public void start() { @Override public void stop() { - this.executor.shutdown(); + if (this.executor != null) { + this.executor.shutdown(); + } } private static class FifoCallRunner implements Runnable { From 8be9e51fdf474a94b12aab63332f8bb0344854f6 Mon Sep 17 00:00:00 2001 From: ConfX <114765570+teamconfx@users.noreply.github.com> Date: Thu, 28 Mar 2024 23:04:02 +0800 Subject: [PATCH 302/514] HBASE-27993 AbstractFSWAL causes ArithmeticException due to improper logRollSize value checking (#5390) Signed-off-by: Duo Zhang Signed-off-by: Wei-Chiu Chuang --- .../apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index a94d827e8e2d..7a057ca7c7b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -476,6 +476,9 @@ protected long getFileNumFromFileName(Path fileName) { } private int calculateMaxLogFiles(Configuration conf, long logRollSize) { + checkArgument(logRollSize > 0, + "The log roll size cannot be zero or negative when calculating max log files, " + + "current value is " + logRollSize); Pair globalMemstoreSize = MemorySizeUtil.getGlobalMemStoreSize(conf); return (int) ((globalMemstoreSize.getFirst() * 2) / logRollSize); } From 9adca10e9c106df5119cc067fd610fb19cd1d5ec Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 28 Mar 2024 23:49:01 +0800 Subject: [PATCH 303/514] HBASE-27990 Addendum fix spotless error --- .../org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index cd65050baa76..855f183b98f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -283,7 +283,8 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration, Configuration conf) throws IOException { - Preconditions.checkArgument(blockSize > 0, "BucketCache capacity is set to " + blockSize + ", can not be less than 0"); + Preconditions.checkArgument(blockSize > 0, + "BucketCache capacity is set to " + blockSize + ", can not be less than 0"); boolean useStrongRef = conf.getBoolean(STRONG_REF_KEY, STRONG_REF_DEFAULT); if (useStrongRef) { this.offsetLock = new IdReadWriteLockStrongRef<>(); From 2941d6ee6542b8b1761a9e88a0a3f048d191eaca Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 1 Apr 2024 16:22:30 +0800 Subject: [PATCH 304/514] HBASE-21533 Add a section in our ref guide for the folding(removal) of hbase:namespace table (#5787) Signed-off-by: Bryan Beaudreault --- src/main/asciidoc/_chapters/datamodel.adoc | 17 +++++++++++++++++ src/main/asciidoc/_chapters/upgrading.adoc | 3 +++ 2 files changed, 20 insertions(+) diff --git a/src/main/asciidoc/_chapters/datamodel.adoc b/src/main/asciidoc/_chapters/datamodel.adoc index c8164e4696a8..2e6070dbc90b 100644 --- a/src/main/asciidoc/_chapters/datamodel.adoc +++ b/src/main/asciidoc/_chapters/datamodel.adoc @@ -238,6 +238,23 @@ create 'bar', 'fam' ---- ==== +[[namespace_table]] +=== About hbase:namespace table +We used to have a system table called `hbase:namespace` for storing the namespace information. + +It introduced some painful bugs in the past, especially that it may hang the master startup thus +hang the whole cluster. This is because meta table also has a namespace, so it depends on namespace +table. But namespace table also depends on meta table as meta table stores the location of all +regions. This is a cyclic dependency so sometimes namespace and meta table will wait for each other +to online and hang the master start up. + +It is not easy to fix so in 3.0.0, we decided to completely remove the `hbase:namespace` table and +fold its content into the `ns` family in `hbase:meta` table. When upgrading from 2.x to 3.x, the +migration will be done automatically and the `hbase:namespace` table will be disabled after the +migration is done. You are free to leave it there for sometime and finally drop it. + +For more tails, please see https://issues.apache.org/jira/browse/HBASE-21154[HBASE-21154]. + == Table Tables are declared up front at schema definition time. diff --git a/src/main/asciidoc/_chapters/upgrading.adoc b/src/main/asciidoc/_chapters/upgrading.adoc index 83030cc5038a..055acd968078 100644 --- a/src/main/asciidoc/_chapters/upgrading.adoc +++ b/src/main/asciidoc/_chapters/upgrading.adoc @@ -327,6 +327,9 @@ Quitting... The RegionServer Grouping feature has been reimplemented. See section <> in <> for more details. +The `hbase:namespace` table has been removed and fold into `hbase:meta`. See section +<> in <> for more details. + [[upgrade2.4]] There is no special consideration upgrading to hbase-2.4.x from 2.3.x. And for earlier versions, just follow the <> guide. In general, 2.2.x should be rolling upgradeable, for 2.1.x From 6f236c0d18c98c68f4ac43049963e1ff3d4c81f4 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 2 Apr 2024 11:58:53 +0100 Subject: [PATCH 305/514] HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777) Signed-off-by: Duo Zhang --- .../hbase/io/hfile/bucket/BucketCache.java | 24 +++++++++++------- .../TestBlockEvictionOnRegionMovement.java | 9 ++++--- .../bucket/TestBucketCachePersister.java | 25 ++++++++++--------- 3 files changed, 33 insertions(+), 25 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 855f183b98f9..9541939db947 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -2073,25 +2073,29 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d // so we need to count all blocks for this file in the backing map under // a read lock for the block offset final List locks = new ArrayList<>(); - LOG.debug("Notifying caching completed for file {}, with total blocks {}", fileName, - dataBlockCount); + LOG.debug("Notifying caching completed for file {}, with total blocks {}, and data blocks {}", + fileName, totalBlockCount, dataBlockCount); try { final MutableInt count = new MutableInt(); LOG.debug("iterating over {} entries in the backing map", backingMap.size()); backingMap.entrySet().stream().forEach(entry -> { - if (entry.getKey().getHfileName().equals(fileName.getName())) { + if ( + entry.getKey().getHfileName().equals(fileName.getName()) + && entry.getKey().getBlockType().equals(BlockType.DATA) + ) { LOG.debug("found block for file {} in the backing map. Acquiring read lock for offset {}", fileName, entry.getKey().getOffset()); ReentrantReadWriteLock lock = offsetLock.getLock(entry.getKey().getOffset()); lock.readLock().lock(); locks.add(lock); + // rechecks the given key is still there (no eviction happened before the lock acquired) if (backingMap.containsKey(entry.getKey())) { count.increment(); } } }); - // We may either place only data blocks on the BucketCache or all type of blocks - if (dataBlockCount == count.getValue() || totalBlockCount == count.getValue()) { + // BucketCache would only have data blocks + if (dataBlockCount == count.getValue()) { LOG.debug("File {} has now been fully cached.", fileName); fileCacheCompleted(fileName, size); } else { @@ -2100,15 +2104,17 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d + "Total blocks for file: {}. Checking for blocks pending cache in cache writer queue.", fileName, count.getValue(), dataBlockCount); if (ramCache.hasBlocksForFile(fileName.getName())) { + for (ReentrantReadWriteLock lock : locks) { + lock.readLock().unlock(); + } LOG.debug("There are still blocks pending caching for file {}. Will sleep 100ms " + "and try the verification again.", fileName); Thread.sleep(100); notifyFileCachingCompleted(fileName, totalBlockCount, dataBlockCount, size); } else { - LOG.info( - "We found only {} blocks cached from a total of {} for file {}, " - + "but no blocks pending caching. Maybe cache is full?", - count, dataBlockCount, fileName); + LOG.info("We found only {} blocks cached from a total of {} for file {}, " + + "but no blocks pending caching. Maybe cache is full or evictions " + + "happened concurrently to cache prefetch.", count, totalBlockCount, fileName); } } } catch (InterruptedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java index eb3e3cc61f4b..7303cf53a55a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.After; import org.junit.Before; @@ -89,7 +90,7 @@ public void setup() throws Exception { @Test public void testBlockEvictionOnRegionMove() throws Exception { // Write to table and flush - TableName tableRegionMove = writeDataToTable(); + TableName tableRegionMove = writeDataToTable("testBlockEvictionOnRegionMove"); HRegionServer regionServingRS = cluster.getRegionServer(1).getRegions(tableRegionMove).size() == 1 @@ -115,7 +116,7 @@ public void testBlockEvictionOnRegionMove() throws Exception { @Test public void testBlockEvictionOnGracefulStop() throws Exception { // Write to table and flush - TableName tableRegionClose = writeDataToTable(); + TableName tableRegionClose = writeDataToTable("testBlockEvictionOnGracefulStop"); HRegionServer regionServingRS = cluster.getRegionServer(1).getRegions(tableRegionClose).size() == 1 @@ -138,8 +139,8 @@ public void testBlockEvictionOnGracefulStop() throws Exception { assertNotEquals(0, regionServingRS.getBlockCache().get().getBlockCaches()[1].getBlockCount()); } - public TableName writeDataToTable() throws IOException, InterruptedException { - TableName tableName = TableName.valueOf("table1"); + public TableName writeDataToTable(String testName) throws IOException, InterruptedException { + TableName tableName = TableName.valueOf(testName + EnvironmentEdgeManager.currentTime()); byte[] row0 = Bytes.toBytes("row1"); byte[] row1 = Bytes.toBytes("row2"); byte[] family = Bytes.toBytes("family"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java index a39df7e14715..d60d2c53ef6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java @@ -164,20 +164,21 @@ public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception { // Load Blocks in cache Path storeFile = writeStoreFile("TestPrefetch3", conf, cacheConf, fs); HFile.createReader(fs, storeFile, cacheConf, true, conf); - while (bucketCache.backingMap.size() == 0) { + boolean evicted = false; + while (!PrefetchExecutor.isCompleted(storeFile)) { + if (bucketCache.backingMap.size() > 0 && !evicted) { + Iterator> it = + bucketCache.backingMap.entrySet().iterator(); + // Evict a data block from cache + Map.Entry entry = it.next(); + while (it.hasNext() && !evicted) { + if (entry.getKey().getBlockType().equals(BlockType.DATA)) { + evicted = bucketCache.evictBlock(it.next().getKey()); + } + } + } Thread.sleep(10); } - Iterator> it = - bucketCache.backingMap.entrySet().iterator(); - // Evict Blocks from cache - bucketCache.evictBlock(it.next().getKey()); - bucketCache.evictBlock(it.next().getKey()); - int retries = 0; - while (!PrefetchExecutor.isCompleted(storeFile) && retries < 5) { - Thread.sleep(500); - retries++; - } - assertTrue(retries < 5); assertFalse(bucketCache.fullyCachedFiles.containsKey(storeFile.getName())); cleanupBucketCache(bucketCache); } From 90639d75a588318571cbcc63fddf66ec2372c105 Mon Sep 17 00:00:00 2001 From: DieterDP <90392398+DieterDP-ng@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:38:48 +0200 Subject: [PATCH 306/514] HBASE-28460 Full backup restore failed on empty HFiles (#5782) Signed-off-by: Bryan Beaudreault --- .../backup/mapreduce/MapReduceRestoreJob.java | 5 +-- .../hadoop/hbase/backup/util/RestoreTool.java | 14 +++++-- .../hadoop/hbase/backup/TestFullRestore.java | 38 +++++++++++++++++++ 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index 5d654c0d85b5..7a2fce4c418a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -87,10 +87,7 @@ public void run(Path[] dirPaths, TableName[] tableNames, Path restoreRootDir, LOG.debug("Restoring HFiles from directory " + bulkOutputPath); } - if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) { - throw new IOException("Can not restore from backup directory " + dirs - + " (check Hadoop and HBase logs). Bulk loader returns null"); - } + loader.bulkLoad(newTableNames[i], bulkOutputPath); } else { throw new IOException("Can not restore from backup directory " + dirs + " (check Hadoop/MR and HBase logs). Player return code =" + result); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java index 8ca80d1301f6..ff4e2672f7a2 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java @@ -61,7 +61,7 @@ */ @InterfaceAudience.Private public class RestoreTool { - public static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class); + public static final Logger LOG = LoggerFactory.getLogger(RestoreTool.class); private final static long TABLE_AVAILABILITY_WAIT_TIME = 180000; private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR }; @@ -437,6 +437,10 @@ byte[][] generateBoundaryKeys(ArrayList regionDirList) throws IOException HFile.Reader reader = HFile.createReader(fs, hfile, conf); final byte[] first, last; try { + if (reader.getEntries() == 0) { + LOG.debug("Skipping hfile with 0 entries: " + hfile); + continue; + } first = reader.getFirstRowKey().get(); last = reader.getLastRowKey().get(); LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" @@ -491,8 +495,12 @@ private void checkAndCreateTable(Connection conn, TableName targetTableName, admin.createTable(htd); } else { keys = generateBoundaryKeys(regionDirList); - // create table using table descriptor and region boundaries - admin.createTable(htd, keys); + if (keys.length > 0) { + // create table using table descriptor and region boundaries + admin.createTable(htd, keys); + } else { + admin.createTable(htd); + } } } catch (NamespaceNotFoundException e) { LOG.warn("There was no namespace and the same will be created"); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 385a6b3c5193..d16d7af75014 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.ClassRule; @@ -71,6 +72,43 @@ public void testFullRestoreSingle() throws Exception { hba.close(); } + @Test + public void testFullRestoreSingleWithRegion() throws Exception { + LOG.info("test full restore on a single table empty table that has a region"); + + // This test creates its own table so other tests are not affected (we adjust it in this test) + TableName tableName = TableName.valueOf("table-full-restore-single-region"); + TEST_UTIL.createTable(tableName, famName); + + Admin admin = TEST_UTIL.getAdmin(); + + // Add & remove data to ensure a region is active, but functionally empty + Table table = TEST_UTIL.getConnection().getTable(tableName); + loadTable(table); + admin.flush(tableName); + TEST_UTIL.deleteTableData(tableName); + admin.flush(tableName); + + TEST_UTIL.compact(tableName, true); + + List tables = Lists.newArrayList(tableName); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + TEST_UTIL.deleteTable(tableName); + + TableName[] tableset = new TableName[] { tableName }; + TableName[] tablemap = new TableName[] { tableName }; + BackupAdmin client = getBackupAdmin(); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, + tablemap, false)); + assertTrue(admin.tableExists(tableName)); + TEST_UTIL.deleteTable(tableName); + admin.close(); + } + @Test public void testFullRestoreSingleCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); From 28c1e3b2a6b81ca26cc33553bb1a204f8756b97f Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 3 Apr 2024 08:39:27 +0200 Subject: [PATCH 307/514] Add stoty to the developer list (#5790) --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 9e1a4bdbe214..6cecebd50af9 100644 --- a/pom.xml +++ b/pom.xml @@ -502,6 +502,12 @@ stack@apache.org -8 + + stoty + Istvan Toth + stoty@apache.org + +1 + syuanjiang Stephen Yuan Jiang From 6101bad5a3a7420d7be5ccff1ccac4ae9cd5252a Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 4 Apr 2024 21:13:10 -0800 Subject: [PATCH 308/514] HBASE-28366 Mis-order of SCP and regionServerReport results into region inconsistencies (#5774) Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- .../hadoop/hbase/master/ServerManager.java | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 3217b6dfcc92..55cfc28bb53a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -324,8 +324,24 @@ public void regionServerReport(ServerName sn, ServerMetrics sl) throws YouAreDea // the ServerName to use. Here we presume a master has already done // that so we'll press on with whatever it gave us for ServerName. if (!checkAndRecordNewServer(sn, sl)) { - LOG.info("RegionServerReport ignored, could not record the server: " + sn); - return; // Not recorded, so no need to move on + // Master already registered server with same (host + port) and higher startcode. + // This can happen if regionserver report comes late from old server + // (possible race condition), by that time master has already processed SCP for that + // server and started accepting regionserver report from new server i.e. server with + // same (host + port) and higher startcode. + // The exception thrown here is not meant to tell the region server it is dead because if + // there is a new server on the same host port, the old server should have already been + // dead in ideal situation. + // The exception thrown here is to skip the later steps of the whole regionServerReport + // request processing. Usually, after recording it in ServerManager, we will call the + // related methods in AssignmentManager to record region states. If the region server + // is already dead, we should not do these steps anymore, so here we throw an exception + // to let the upper layer know that they should not continue processing anymore. + final String errorMsg = "RegionServerReport received from " + sn + + ", but another server with the same name and higher startcode is already registered," + + " ignoring"; + LOG.warn(errorMsg); + throw new YouAreDeadException(errorMsg); } } updateLastFlushedSequenceIds(sn, sl); From aea7e7c85cdb8628fb03ead0f94d8e07ad49f067 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Fri, 5 Apr 2024 10:56:06 +0100 Subject: [PATCH 309/514] [ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777) (#5791) Signed-off-by: Peter Somogyi --- .../hbase/io/hfile/bucket/BucketCache.java | 46 +++++++++++++------ .../hfile/bucket/TestPrefetchPersistence.java | 35 ++------------ 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 9541939db947..71bfc757e51e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -747,7 +747,8 @@ private boolean doEvictBlock(BlockCacheKey cacheKey, BucketEntry bucketEntry, } else { return bucketEntryToUse.withWriteLock(offsetLock, () -> { if (backingMap.remove(cacheKey, bucketEntryToUse)) { - LOG.debug("removed key {} from back map in the evict process", cacheKey); + LOG.debug("removed key {} from back map with offset lock {} in the evict process", + cacheKey, bucketEntryToUse.offset()); blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, evictedByEvictionProcess); return true; } @@ -1658,19 +1659,21 @@ protected String getAlgorithm() { @Override public int evictBlocksByHfileName(String hfileName) { fileNotFullyCached(hfileName); - Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), - true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); - + Set keySet = getAllCacheKeysForFile(hfileName); int numEvicted = 0; for (BlockCacheKey key : keySet) { if (evictBlock(key)) { ++numEvicted; } } - return numEvicted; } + private Set getAllCacheKeysForFile(String hfileName) { + return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, + new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + } + /** * Used to group bucket entries into priority buckets. There will be a BucketEntryGroup for each * priority (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate @@ -2083,25 +2086,32 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d entry.getKey().getHfileName().equals(fileName.getName()) && entry.getKey().getBlockType().equals(BlockType.DATA) ) { - LOG.debug("found block for file {} in the backing map. Acquiring read lock for offset {}", - fileName, entry.getKey().getOffset()); - ReentrantReadWriteLock lock = offsetLock.getLock(entry.getKey().getOffset()); + long offsetToLock = entry.getValue().offset(); + LOG.debug("found block {} in the backing map. Acquiring read lock for offset {}", + entry.getKey(), offsetToLock); + ReentrantReadWriteLock lock = offsetLock.getLock(offsetToLock); lock.readLock().lock(); locks.add(lock); // rechecks the given key is still there (no eviction happened before the lock acquired) if (backingMap.containsKey(entry.getKey())) { count.increment(); + } else { + lock.readLock().unlock(); + locks.remove(lock); + LOG.debug("found block {}, but when locked and tried to count, it was gone."); } } }); + int metaCount = totalBlockCount - dataBlockCount; // BucketCache would only have data blocks if (dataBlockCount == count.getValue()) { LOG.debug("File {} has now been fully cached.", fileName); fileCacheCompleted(fileName, size); } else { LOG.debug( - "Prefetch executor completed for {}, but only {} blocks were cached. " - + "Total blocks for file: {}. Checking for blocks pending cache in cache writer queue.", + "Prefetch executor completed for {}, but only {} data blocks were cached. " + + "Total data blocks for file: {}. " + + "Checking for blocks pending cache in cache writer queue.", fileName, count.getValue(), dataBlockCount); if (ramCache.hasBlocksForFile(fileName.getName())) { for (ReentrantReadWriteLock lock : locks) { @@ -2111,11 +2121,17 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d + "and try the verification again.", fileName); Thread.sleep(100); notifyFileCachingCompleted(fileName, totalBlockCount, dataBlockCount, size); - } else { - LOG.info("We found only {} blocks cached from a total of {} for file {}, " - + "but no blocks pending caching. Maybe cache is full or evictions " - + "happened concurrently to cache prefetch.", count, totalBlockCount, fileName); - } + } else + if ((getAllCacheKeysForFile(fileName.getName()).size() - metaCount) == dataBlockCount) { + LOG.debug("We counted {} data blocks, expected was {}, there was no more pending in " + + "the cache write queue but we now found that total cached blocks for file {} " + + "is equal to data block count.", count, dataBlockCount, fileName.getName()); + fileCacheCompleted(fileName, size); + } else { + LOG.info("We found only {} data blocks cached from a total of {} for file {}, " + + "but no blocks pending caching. Maybe cache is full or evictions " + + "happened concurrently to cache prefetch.", count, dataBlockCount, fileName); + } } } catch (InterruptedException e) { throw new RuntimeException(e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java index 035cdc3f887e..4da2d5af9232 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchPersistence.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; @@ -34,11 +33,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; -import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil; @@ -121,8 +117,8 @@ public void testPrefetchPersistence() throws Exception { // Load Cache Path storeFile = writeStoreFile("TestPrefetch0"); Path storeFile2 = writeStoreFile("TestPrefetch1"); - readStoreFile(storeFile, 0); - readStoreFile(storeFile2, 0); + readStoreFile(storeFile); + readStoreFile(storeFile2); usedSize = bucketCache.getAllocator().getUsedSize(); assertNotEquals(0, usedSize); @@ -133,39 +129,18 @@ public void testPrefetchPersistence() throws Exception { testDir + "/bucket.persistence", 60 * 1000, conf); cacheConf = new CacheConfig(conf, bucketCache); assertTrue(usedSize != 0); - readStoreFile(storeFile, 0); - readStoreFile(storeFile2, 0); - // Test Close Store File - closeStoreFile(storeFile2); + assertTrue(bucketCache.fullyCachedFiles.containsKey(storeFile.getName())); + assertTrue(bucketCache.fullyCachedFiles.containsKey(storeFile2.getName())); TEST_UTIL.cleanupTestDir(); } - public void closeStoreFile(Path path) throws Exception { - HFile.Reader reader = HFile.createReader(fs, path, cacheConf, true, conf); - assertTrue(bucketCache.fullyCachedFiles.containsKey(path.getName())); - reader.close(true); - assertFalse(bucketCache.fullyCachedFiles.containsKey(path.getName())); - } - - public void readStoreFile(Path storeFilePath, long offset) throws Exception { + public void readStoreFile(Path storeFilePath) throws Exception { // Open the file HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); - while (!reader.prefetchComplete()) { // Sleep for a bit Thread.sleep(1000); } - HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); - BucketEntry be = bucketCache.backingMap.get(blockCacheKey); - boolean isCached = bucketCache.getBlock(blockCacheKey, true, false, true) != null; - - if ( - block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX - || block.getBlockType() == BlockType.INTERMEDIATE_INDEX - ) { - assertTrue(isCached); - } } public Path writeStoreFile(String fname) throws IOException { From 558b151c5b25863e1f05dc8bb5050ea398108d5e Mon Sep 17 00:00:00 2001 From: Thomas Sarens Date: Sat, 6 Apr 2024 19:22:17 +0200 Subject: [PATCH 310/514] HBASE-28483 backup merge fails on bulkloaded hfiles (#5795) Signed-off-by: Bryan Beaudreault --- ...estIncrementalBackupMergeWithBulkLoad.java | 250 ++++++++++++++++++ .../hbase/mapreduce/HFileInputFormat.java | 25 +- hbase-replication/pom.xml | 5 - 3 files changed, 266 insertions(+), 14 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java new file mode 100644 index 000000000000..058413fa1d15 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE; +import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup; +import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup; +import static org.apache.hadoop.hbase.backup.BackupType.FULL; +import static org.apache.hadoop.hbase.backup.BackupType.INCREMENTAL; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testing.TestingHBaseCluster; +import org.apache.hadoop.hbase.testing.TestingHBaseClusterOption; +import org.apache.hadoop.hbase.tool.BulkLoadHFiles; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category(MediumTests.class) +@RunWith(Parameterized.class) +public class TestIncrementalBackupMergeWithBulkLoad { + + private static final Logger LOG = + LoggerFactory.getLogger(TestIncrementalBackupMergeWithBulkLoad.class); + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncrementalBackupMergeWithBulkLoad.class); + + @Parameterized.Parameters(name = "{index}: useBulkLoad={0}") + public static Iterable data() { + return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; + } + + @Parameterized.Parameter(0) + public boolean useBulkLoad; + + private TableName sourceTable; + private TableName targetTable; + + private List allTables; + private static TestingHBaseCluster cluster; + private static final Path BACKUP_ROOT_DIR = new Path("backupIT"); + private static final byte[] COLUMN_FAMILY = Bytes.toBytes("0"); + + @BeforeClass + public static void beforeClass() throws Exception { + Configuration conf = HBaseConfiguration.create(); + enableBackup(conf); + cluster = TestingHBaseCluster.create(TestingHBaseClusterOption.builder().conf(conf).build()); + cluster.start(); + } + + @AfterClass + public static void afterClass() throws Exception { + cluster.stop(); + } + + @Before + public void setUp() throws Exception { + sourceTable = TableName.valueOf("table-" + useBulkLoad); + targetTable = TableName.valueOf("another-table-" + useBulkLoad); + allTables = Arrays.asList(sourceTable, targetTable); + createTable(sourceTable); + createTable(targetTable); + } + + @Test + public void testMergeContainingBulkloadedHfiles() throws Exception { + Instant timestamp = Instant.now(); + + String backupId = backup(FULL, allTables); + BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + + // load some data + load(sourceTable, timestamp, "data"); + + String backupId1 = backup(INCREMENTAL, allTables); + backupInfo = verifyBackup(cluster.getConf(), backupId1, INCREMENTAL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + + String backupId2 = backup(INCREMENTAL, allTables); + backupInfo = verifyBackup(cluster.getConf(), backupId2, INCREMENTAL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + + merge(new String[] { backupId1, backupId2 }); + backupInfo = verifyBackup(cluster.getConf(), backupId2, INCREMENTAL, COMPLETE); + assertTrue(backupInfo.getTables().contains(sourceTable)); + validateDataEquals(sourceTable, "data"); + } + + private void createTable(TableName tableName) throws IOException { + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(COLUMN_FAMILY)); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Admin admin = connection.getAdmin()) { + admin.createTable(builder.build()); + } + } + + private void load(TableName tableName, Instant timestamp, String data) throws IOException { + if (useBulkLoad) { + hFileBulkLoad(tableName, timestamp, data); + } else { + putLoad(tableName, timestamp, data); + } + } + + private void putLoad(TableName tableName, Instant timestamp, String data) throws IOException { + LOG.info("Writing new data to HBase using normal Puts: {}", data); + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf())) { + Table table = connection.getTable(sourceTable); + List puts = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + Put put = new Put(Bytes.toBytes(i), timestamp.toEpochMilli()); + put.addColumn(COLUMN_FAMILY, Bytes.toBytes("data"), Bytes.toBytes(data)); + puts.add(put); + + if (i % 100 == 0) { + table.put(puts); + puts.clear(); + } + } + if (!puts.isEmpty()) { + table.put(puts); + } + connection.getAdmin().flush(tableName); + } + } + + private void hFileBulkLoad(TableName tableName, Instant timestamp, String data) + throws IOException { + FileSystem fs = FileSystem.get(cluster.getConf()); + LOG.info("Writing new data to HBase using BulkLoad: {}", data); + // HFiles require this strict directory structure to allow to load them + Path hFileRootPath = new Path("/tmp/hfiles_" + UUID.randomUUID()); + fs.mkdirs(hFileRootPath); + Path hFileFamilyPath = new Path(hFileRootPath, Bytes.toString(COLUMN_FAMILY)); + fs.mkdirs(hFileFamilyPath); + try (HFile.Writer writer = HFile.getWriterFactoryNoCache(cluster.getConf()) + .withPath(fs, new Path(hFileFamilyPath, "hfile_" + UUID.randomUUID())) + .withFileContext(new HFileContextBuilder().withTableName(tableName.toBytes()) + .withColumnFamily(COLUMN_FAMILY).build()) + .create()) { + for (int i = 0; i < 10; i++) { + writer.append(new KeyValue(Bytes.toBytes(i), COLUMN_FAMILY, Bytes.toBytes("data"), + timestamp.toEpochMilli(), Bytes.toBytes(data))); + } + } + Map result = + BulkLoadHFiles.create(cluster.getConf()).bulkLoad(tableName, hFileRootPath); + assertFalse(result.isEmpty()); + } + + private String backup(BackupType backupType, List tables) throws IOException { + LOG.info("Creating the backup ..."); + + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + BackupRequest backupRequest = + new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString()) + .withTableList(new ArrayList<>(tables)).withBackupType(backupType).build(); + return backupAdmin.backupTables(backupRequest); + } + } + + private void merge(String[] backupIds) throws IOException { + LOG.info("Merging the backups ..."); + + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + BackupAdmin backupAdmin = new BackupAdminImpl(connection)) { + backupAdmin.mergeBackups(backupIds); + } + } + + private void validateDataEquals(TableName tableName, String expectedData) throws IOException { + try (Connection connection = ConnectionFactory.createConnection(cluster.getConf()); + Table table = connection.getTable(tableName)) { + Scan scan = new Scan(); + scan.readAllVersions(); + scan.setRaw(true); + scan.setBatch(100); + + for (Result sourceResult : table.getScanner(scan)) { + List sourceCells = sourceResult.listCells(); + for (Cell cell : sourceCells) { + assertEquals(expectedData, Bytes.toStringBinary(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); + } + } + } + } + +} diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 1fdcf4bcfd44..1bbbe513f738 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.OptionalLong; import org.apache.hadoop.conf.Configuration; @@ -152,19 +151,27 @@ protected List listStatus(JobContext job) throws IOException { List result = new ArrayList(); // Explode out directories that match the original FileInputFormat filters - // since HFiles are written to directories where the - // directory name is the column name + // Typically these are -level dirs, only requiring 1 level of recursion to + // get the -level dirs where the HFile are written, but in some cases + // -level dirs are provided requiring 2 levels of recursion. for (FileStatus status : super.listStatus(job)) { - if (status.isDirectory()) { - FileSystem fs = status.getPath().getFileSystem(job.getConfiguration()); - Collections.addAll(result, fs.listStatus(status.getPath(), HIDDEN_FILE_FILTER)); - } else { - result.add(status); - } + addFilesRecursively(job, status, result); } return result; } + private static void addFilesRecursively(JobContext job, FileStatus status, + List result) throws IOException { + if (status.isDirectory()) { + FileSystem fs = status.getPath().getFileSystem(job.getConfiguration()); + for (FileStatus fileStatus : fs.listStatus(status.getPath(), HIDDEN_FILE_FILTER)) { + addFilesRecursively(job, fileStatus, result); + } + } else { + result.add(status); + } + } + @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index 2fc4e304ee8c..f44940376f82 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -119,11 +119,6 @@ mockito-core test - - org.hamcrest - hamcrest-library - test - org.slf4j jcl-over-slf4j From c1012a9ebec9bb9fcc09f2d6fdc78e74cc44d562 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 7 Apr 2024 16:43:50 +0800 Subject: [PATCH 311/514] HBASE-28457 Introduce a version field in file based tracker record (#5784) Signed-off-by: Wellington Chevreuil --- .../server/region/StoreFileTracker.proto | 1 + .../storefiletracker/StoreFileListFile.java | 62 ++++++++++++++----- .../TestStoreFileListFile.java | 17 +++++ 3 files changed, 64 insertions(+), 16 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto index 2a269ea4ac4e..001cb3ea233c 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto @@ -33,4 +33,5 @@ message StoreFileEntry { message StoreFileList { required uint64 timestamp = 1; repeated StoreFileEntry store_file = 2; + optional uint64 version = 3 [default = 1]; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java index 7a6938106d3a..b6287b076b3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver.storefiletracker; +import com.google.errorprone.annotations.RestrictedApi; import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.NavigableMap; @@ -59,19 +61,28 @@ * without error on partial bytes if you stop at some special points, but the return message will * have incorrect field value. We should try our best to prevent this happens because loading an * incorrect store file list file usually leads to data loss. + *

+ * To prevent failing silently while downgrading, where we may miss some newly introduced fields in + * {@link StoreFileList} which are necessary, we introduce a 'version' field in + * {@link StoreFileList}. If we find out that we are reading a {@link StoreFileList} with higher + * version, we will fail immediately and tell users that you need extra steps while downgrading, to + * prevent potential data loss. */ @InterfaceAudience.Private class StoreFileListFile { private static final Logger LOG = LoggerFactory.getLogger(StoreFileListFile.class); + // the current version for StoreFileList + static final long VERSION = 1; + static final String TRACK_FILE_DIR = ".filelist"; - private static final String TRACK_FILE_PREFIX = "f1"; + static final String TRACK_FILE_PREFIX = "f1"; private static final String TRACK_FILE_ROTATE_PREFIX = "f2"; - private static final char TRACK_FILE_SEPARATOR = '.'; + static final char TRACK_FILE_SEPARATOR = '.'; static final Pattern TRACK_FILE_PATTERN = Pattern.compile("^f(1|2)\\.\\d+$"); @@ -114,7 +125,18 @@ static StoreFileList load(FileSystem fs, Path path) throws IOException { throw new IOException( "Checksum mismatch, expected " + expectedChecksum + ", actual " + calculatedChecksum); } - return StoreFileList.parseFrom(data); + StoreFileList storeFileList = StoreFileList.parseFrom(data); + if (storeFileList.getVersion() > VERSION) { + LOG.error( + "The loaded store file list is in version {}, which is higher than expected" + + " version {}. Stop loading to prevent potential data loss. This usually because your" + + " cluster is downgraded from a newer version. You need extra steps before downgrading," + + " like switching back to default store file tracker.", + storeFileList.getVersion(), VERSION); + throw new IOException("Higher store file list version detected, expected " + VERSION + + ", got " + storeFileList.getVersion()); + } + return storeFileList; } StoreFileList load(Path path) throws IOException { @@ -145,7 +167,7 @@ private NavigableMap> listFiles() throws IOException { if (statuses == null || statuses.length == 0) { return Collections.emptyNavigableMap(); } - TreeMap> map = new TreeMap<>((l1, l2) -> l2.compareTo(l1)); + TreeMap> map = new TreeMap<>(Comparator.reverseOrder()); for (FileStatus status : statuses) { Path file = status.getPath(); if (!status.isFile()) { @@ -232,8 +254,23 @@ StoreFileList load(boolean readOnly) throws IOException { return lists[winnerIndex]; } + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/StoreFileListFile.java|.*/src/test/.*") + static void write(FileSystem fs, Path file, StoreFileList storeFileList) throws IOException { + byte[] data = storeFileList.toByteArray(); + CRC32 crc32 = new CRC32(); + crc32.update(data); + int checksum = (int) crc32.getValue(); + // 4 bytes length at the beginning, plus 4 bytes checksum + try (FSDataOutputStream out = fs.create(file, true)) { + out.writeInt(data.length); + out.write(data); + out.writeInt(checksum); + } + } + /** - * We will set the timestamp in this method so just pass the builder in + * We will set the timestamp and version in this method so just pass the builder in */ void update(StoreFileList.Builder builder) throws IOException { if (nextTrackFile < 0) { @@ -241,22 +278,15 @@ void update(StoreFileList.Builder builder) throws IOException { // we are already in the update method, which is not read only, so pass false load(false); } - long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime()); - byte[] actualData = builder.setTimestamp(timestamp).build().toByteArray(); - CRC32 crc32 = new CRC32(); - crc32.update(actualData); - int checksum = (int) crc32.getValue(); - // 4 bytes length at the beginning, plus 4 bytes checksum FileSystem fs = ctx.getRegionFileSystem().getFileSystem(); - try (FSDataOutputStream out = fs.create(trackFiles[nextTrackFile], true)) { - out.writeInt(actualData.length); - out.write(actualData); - out.writeInt(checksum); - } + long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime()); + write(fs, trackFiles[nextTrackFile], + builder.setTimestamp(timestamp).setVersion(VERSION).build()); // record timestamp prevTimestamp = timestamp; // rotate the file nextTrackFile = 1 - nextTrackFile; + try { fs.delete(trackFiles[nextTrackFile], false); } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java index c3d876ec0142..f1fcb924f899 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.Before; import org.junit.ClassRule; @@ -222,4 +223,20 @@ public void testConcurrentUpdate() throws IOException { assertEquals("hehe", entry.getName()); assertEquals(10, entry.getSize()); } + + @Test + public void testLoadHigherVersion() throws IOException { + // write a fake StoreFileList file with higher version + StoreFileList storeFileList = + StoreFileList.newBuilder().setVersion(StoreFileListFile.VERSION + 1) + .setTimestamp(EnvironmentEdgeManager.currentTime()).build(); + Path trackFileDir = new Path(testDir, StoreFileListFile.TRACK_FILE_DIR); + StoreFileListFile.write(FileSystem.get(UTIL.getConfiguration()), + new Path(trackFileDir, StoreFileListFile.TRACK_FILE_PREFIX + + StoreFileListFile.TRACK_FILE_SEPARATOR + EnvironmentEdgeManager.currentTime()), + storeFileList); + IOException error = assertThrows(IOException.class, () -> create().load(false)); + assertEquals("Higher store file list version detected, expected " + StoreFileListFile.VERSION + + ", got " + (StoreFileListFile.VERSION + 1), error.getMessage()); + } } From e5d59cadc5dc9ac7d6f15555be1a8defc05862ad Mon Sep 17 00:00:00 2001 From: guluo Date: Sun, 7 Apr 2024 17:27:28 +0800 Subject: [PATCH 312/514] HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789) Signed-off-by: Duo Zhang Reviewed-by: Vineet Kumar Maheshwari --- .../hbase/client/MutableRegionInfo.java | 2 +- .../procedure/CreateTableProcedure.java | 14 +++++++-- .../procedure/TestCreateTableProcedure.java | 29 +++++++++++++++++++ 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index a9382f3a9bed..4217201b85e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -96,7 +96,7 @@ private static TableName checkTableName(TableName tableName) { private static int checkReplicaId(int regionId) { if (regionId > MAX_REPLICA_ID) { - throw new IllegalArgumentException("ReplicaId cannot be greater than" + MAX_REPLICA_ID); + throw new IllegalArgumentException("ReplicaId cannot be greater than " + MAX_REPLICA_ID); } return regionId; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 533b6fffcc43..17998fec7bd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -56,6 +56,8 @@ public class CreateTableProcedure extends AbstractStateMachineTableProcedure { private static final Logger LOG = LoggerFactory.getLogger(CreateTableProcedure.class); + private static final int MAX_REGION_REPLICATION = 0x10000; + private TableDescriptor tableDescriptor; private List newRegions; @@ -84,10 +86,10 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS switch (state) { case CREATE_TABLE_PRE_OPERATION: // Verify if we can create the table - boolean exists = !prepareCreate(env); + boolean success = prepareCreate(env); releaseSyncLatch(); - if (exists) { + if (!success) { assert isFailed() : "the delete should have an exception here"; return Flow.NO_MORE_STATE; } @@ -262,6 +264,14 @@ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException { "Table " + getTableName().toString() + " should have at least one column family.")); return false; } + + int regionReplicationCount = tableDescriptor.getRegionReplication(); + if (regionReplicationCount > MAX_REGION_REPLICATION) { + setFailure("master-create-table", new IllegalArgumentException( + "Region Replication cannot exceed " + MAX_REGION_REPLICATION + ".")); + return false; + } + if (!tableName.isSystemTable()) { // do not check rs group for system tables as we may block the bootstrap. Supplier forWhom = () -> "table " + tableName; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index 618a4a45a044..bed41f4da86c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -284,4 +285,32 @@ public void testOnHDFSFailure() throws Exception { new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, regions)); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); } + + @Test + public void testCreateTableWithManyRegionReplication() throws IOException { + final int EXCEED_MAX_REGION_REPLICATION = 0x10001; + TableName tableName = TableName.valueOf(name.getMethodName()); + ProcedureExecutor procExec = getMasterProcedureExecutor(); + + TableDescriptor tableWithManyRegionReplication = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build(); + RegionInfo[] regions01 = + ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, null); + long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new CreateTableProcedure( + procExec.getEnvironment(), tableWithManyRegionReplication, regions01)); + Procedure result01 = procExec.getResult(procId01); + assertTrue(result01.getException().getCause() instanceof IllegalArgumentException); + assertFalse(UTIL.getAdmin().tableExists(tableName)); + + TableDescriptor tdesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); + RegionInfo[] regions02 = ModifyRegionUtils.createRegionInfos(tdesc, null); + long procId02 = ProcedureTestingUtility.submitAndWait(procExec, + new CreateTableProcedure(procExec.getEnvironment(), tdesc, regions02)); + Procedure result02 = procExec.getResult(procId02); + assertTrue(result02.isSuccess()); + assertTrue(UTIL.getAdmin().tableExists(tableName)); + } } From eeebbdfa723dd49aeaf4a6bc061382752002c5a6 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 7 Apr 2024 18:40:13 +0800 Subject: [PATCH 313/514] HBASE-28478 Remove the hbase1 compatible code in FixedFileTrailer (#5788) Signed-off-by: Bryan Beaudreault --- .../hbase/io/hfile/FixedFileTrailer.java | 42 +--------- .../hbase/io/hfile/TestFixedFileTrailer.java | 76 ++++++------------- 2 files changed, 27 insertions(+), 91 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 2a405197a480..eaf79f311038 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -26,7 +26,6 @@ import java.nio.ByteBuffer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.InnerStoreCellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaCellComparator; @@ -206,8 +205,7 @@ HFileProtos.FileTrailerProto toProtobuf() { .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) - .setLastDataBlockOffset(lastDataBlockOffset) - .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) + .setLastDataBlockOffset(lastDataBlockOffset).setComparatorClassName(comparatorClassName) .setCompressionCodec(compressionCodec.ordinal()); if (encryptionKey != null) { builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); @@ -216,8 +214,7 @@ HFileProtos.FileTrailerProto toProtobuf() { } /** - * Write trailer data as protobuf. NOTE: we run a translation on the comparator name and will - * serialize the old hbase-1.x where it makes sense. See {@link #getHBase1CompatibleName(String)}. + * Write trailer data as protobuf. */ void serializeAsPB(DataOutputStream output) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -553,41 +550,6 @@ public void setComparatorClass(Class klass) { } } - /** - * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather than - * the new name; writing the new name will make it so newly-written hfiles are not parseable by - * hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters - * reading hbase-2.x produce. - *

- * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare KeyValues. In - * hbase-2.x they were renamed making use of the more generic 'Cell' nomenclature to indicate that - * we intend to move away from KeyValues post hbase-2. A naming change is not reason enough to - * make it so hbase-1.x cannot read hbase-2.x files given the structure goes unchanged (hfile v3). - * So, lets write the old names for Comparators into the hfile tails in hbase-2. Here is where we - * do the translation. {@link #getComparatorClass(String)} does translation going the other way. - *

- * The translation is done on the serialized Protobuf only. - *

- * @param comparator String class name of the Comparator used in this hfile. - * @return What to store in the trailer as our comparator name. - * @see #getComparatorClass(String) - * @since hbase-2.0.0. - * @deprecated Since hbase-2.0.0. Will be removed in hbase-3.0.0. - */ - @Deprecated - private String getHBase1CompatibleName(final String comparator) { - if ( - comparator.equals(CellComparatorImpl.class.getName()) - || comparator.equals(InnerStoreCellComparator.class.getName()) - ) { - return KeyValue.COMPARATOR.getClass().getName(); - } - if (comparator.equals(MetaCellComparator.class.getName())) { - return KeyValue.META_COMPARATOR.getClass().getName(); - } - return comparator; - } - @SuppressWarnings("unchecked") private static Class getComparatorClass(String comparatorClassName) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 442f62e505db..3bad8d46a149 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hbase.io.hfile; +import static org.apache.hadoop.hbase.io.hfile.FixedFileTrailer.createComparator; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -46,18 +48,14 @@ import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; - @RunWith(Parameterized.class) @Category({ IOTests.class, SmallTests.class }) public class TestFixedFileTrailer { @@ -88,9 +86,6 @@ public TestFixedFileTrailer(int version) { this.version = version; } - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - @Parameters public static Collection getParameters() { List versionsToTest = new ArrayList<>(); @@ -104,54 +99,33 @@ public void setUp() throws IOException { fs = FileSystem.get(util.getConfiguration()); } - @Test - public void testComparatorIsHBase1Compatible() { - FixedFileTrailer t = new FixedFileTrailer(version, HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); - t.setComparatorClass(CellComparatorImpl.COMPARATOR.getClass()); - assertEquals(CellComparatorImpl.COMPARATOR.getClass().getName(), t.getComparatorClassName()); - HFileProtos.FileTrailerProto pb = t.toProtobuf(); - assertEquals(KeyValue.COMPARATOR.getClass().getName(), pb.getComparatorClassName()); - t.setComparatorClass(MetaCellComparator.META_COMPARATOR.getClass()); - pb = t.toProtobuf(); - assertEquals(KeyValue.META_COMPARATOR.getClass().getName(), pb.getComparatorClassName()); - } - @Test public void testCreateComparator() throws IOException { - FixedFileTrailer t = new FixedFileTrailer(version, HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); - try { - assertEquals(InnerStoreCellComparator.class, - t.createComparator(KeyValue.COMPARATOR.getLegacyKeyComparatorName()).getClass()); - assertEquals(InnerStoreCellComparator.class, - t.createComparator(KeyValue.COMPARATOR.getClass().getName()).getClass()); - assertEquals(InnerStoreCellComparator.class, - t.createComparator(CellComparator.class.getName()).getClass()); - assertEquals(MetaCellComparator.class, - t.createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); - assertEquals(MetaCellComparator.class, - t.createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); - assertEquals(MetaCellComparator.class, - t.createComparator("org.apache.hadoop.hbase.CellComparator$MetaCellComparator").getClass()); - assertEquals(MetaCellComparator.class, - t.createComparator("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator") - .getClass()); - assertEquals(MetaCellComparator.class, - t.createComparator(MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); - assertEquals(MetaCellComparator.META_COMPARATOR.getClass(), - t.createComparator(MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); - assertEquals(CellComparatorImpl.COMPARATOR.getClass(), - t.createComparator(MetaCellComparator.COMPARATOR.getClass().getName()).getClass()); - assertNull(t.createComparator(Bytes.BYTES_RAWCOMPARATOR.getClass().getName())); - assertNull(t.createComparator("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")); - } catch (IOException e) { - fail("Unexpected exception while testing FixedFileTrailer#createComparator(), " - + e.getMessage()); - } + assertEquals(InnerStoreCellComparator.class, + createComparator(KeyValue.COMPARATOR.getLegacyKeyComparatorName()).getClass()); + assertEquals(InnerStoreCellComparator.class, + createComparator(KeyValue.COMPARATOR.getClass().getName()).getClass()); + assertEquals(InnerStoreCellComparator.class, + createComparator(CellComparator.class.getName()).getClass()); + assertEquals(MetaCellComparator.class, + createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); + assertEquals(MetaCellComparator.class, + createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(MetaCellComparator.class, + createComparator("org.apache.hadoop.hbase.CellComparator$MetaCellComparator").getClass()); + assertEquals(MetaCellComparator.class, + createComparator("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator").getClass()); + assertEquals(MetaCellComparator.class, + createComparator(MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(MetaCellComparator.META_COMPARATOR.getClass(), + createComparator(MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(CellComparatorImpl.COMPARATOR.getClass(), + createComparator(MetaCellComparator.COMPARATOR.getClass().getName()).getClass()); + assertNull(createComparator(Bytes.BYTES_RAWCOMPARATOR.getClass().getName())); + assertNull(createComparator("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")); // Test an invalid comparatorClassName - expectedEx.expect(IOException.class); - t.createComparator(""); - + assertThrows(IOException.class, () -> createComparator("")); } @Test From 3340d8dd07eba18b71dcc44cd1fa7633ac630a5e Mon Sep 17 00:00:00 2001 From: chandrasekhar-188k <154109917+chandrasekhar-188k@users.noreply.github.com> Date: Sun, 7 Apr 2024 18:44:06 +0530 Subject: [PATCH 314/514] HBASE-28183 It's impossible to re-enable the quota table if it gets disabled (#5691) Signed-off-by: Bryan Beaudreault Signed-off-by: Pankaj Kumar --- .../apache/hadoop/hbase/master/HMaster.java | 24 +++++++++++-------- .../hadoop/hbase/quotas/TestQuotaAdmin.java | 11 +++++++++ 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index ddef3e27b405..0dc5b61cba8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2728,16 +2728,20 @@ protected void run() throws IOException { MasterQuotaManager quotaManager = getMasterQuotaManager(); if (quotaManager != null) { if (quotaManager.isQuotaInitialized()) { - SpaceQuotaSnapshot currSnapshotOfTable = - QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); - if (currSnapshotOfTable != null) { - SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); - if ( - quotaStatus.isInViolation() - && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null) - ) { - throw new AccessDeniedException("Enabling the table '" + tableName - + "' is disallowed due to a violated space quota."); + // skip checking quotas for system tables, see: + // https://issues.apache.org/jira/browse/HBASE-28183 + if (!tableName.isSystemTable()) { + SpaceQuotaSnapshot currSnapshotOfTable = + QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); + if (currSnapshotOfTable != null) { + SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); + if ( + quotaStatus.isInViolation() + && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null) + ) { + throw new AccessDeniedException("Enabling the table '" + tableName + + "' is disallowed due to a violated space quota."); + } } } } else if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java index c577e9aceace..817f135f0c95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java @@ -994,4 +994,15 @@ public int getQuotaSettingCount(Admin admin) throws IOException { } return quotaSettingCount; } + + @Test + public void testQuotaTableDisableAndEnable() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + admin.disableTable(QuotaUtil.QUOTA_TABLE_NAME); + try { + admin.enableTable(QuotaUtil.QUOTA_TABLE_NAME); + } catch (Exception ex) { + fail("Got an exception while enabling table: " + QuotaUtil.QUOTA_TABLE_NAME); + } + } } From bf836a980736a43d7b0f05e6c24de7122fa7b530 Mon Sep 17 00:00:00 2001 From: Monani Mihir Date: Mon, 8 Apr 2024 14:22:53 -0700 Subject: [PATCH 315/514] HBASE-26192 Master UI hbck should provide a JSON formatted output option (#5772) Signed-off-by: Andrew Purtell --- .../hadoop/hbase/HbckEmptyRegionInfo.java | 38 ++ .../hadoop/hbase/HbckInconsistentRegions.java | 51 +++ .../hadoop/hbase/HbckOrphanRegionsOnFS.java | 43 ++ .../hadoop/hbase/HbckOrphanRegionsOnRS.java | 43 ++ .../hadoop/hbase/HbckOverlapRegions.java | 44 ++ .../hadoop/hbase/HbckRegionDetails.java | 54 +++ .../apache/hadoop/hbase/HbckRegionHoles.java | 44 ++ .../apache/hadoop/hbase/HbckServerName.java | 48 ++ .../hadoop/hbase/HbckUnknownServers.java | 44 ++ .../apache/hadoop/hbase/master/HMaster.java | 24 + .../hadoop/hbase/master/MasterServices.java | 4 + .../master/http/hbck/HbckConfigFactory.java | 54 +++ .../master/http/hbck/model/HbckMetrics.java | 98 ++++ .../hbck/resource/HbckMetricsResource.java | 140 ++++++ .../hbase/master/MockNoopMasterServices.java | 6 + .../master/http/TestHbckMetricsResource.java | 422 ++++++++++++++++++ 16 files changed, 1157 insertions(+) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckEmptyRegionInfo.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckInconsistentRegions.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnFS.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnRS.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOverlapRegions.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionDetails.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionHoles.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckServerName.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HbckUnknownServers.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/HbckConfigFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/model/HbckMetrics.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/resource/HbckMetricsResource.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestHbckMetricsResource.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckEmptyRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckEmptyRegionInfo.java new file mode 100644 index 000000000000..5d1ca54bf1be --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckEmptyRegionInfo.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present Empty Region Info from Catalog Janitor Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of Catalog Janitor + * inconsistencies. + */ +@InterfaceAudience.Public +public class HbckEmptyRegionInfo { + private final String regionInfo; + + public HbckEmptyRegionInfo(String emptyRegionInfo) { + this.regionInfo = emptyRegionInfo; + } + + public String getRegionInfo() { + return regionInfo; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckInconsistentRegions.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckInconsistentRegions.java new file mode 100644 index 000000000000..f32f73a73d15 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckInconsistentRegions.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present HBCK Inconsistent Regions from HBCK Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of HBCK inconsistencies. + */ +@InterfaceAudience.Public +public class HbckInconsistentRegions { + private final String regionId; + private final HbckServerName serverNameInMeta; + private final List listOfServers; + + public HbckInconsistentRegions(String inconsistentRegionId, HbckServerName serverNameInMeta, + List listOfServerName) { + this.regionId = inconsistentRegionId; + this.serverNameInMeta = serverNameInMeta; + this.listOfServers = listOfServerName; + } + + public String getRegionId() { + return regionId; + } + + public HbckServerName getServerNameInMeta() { + return serverNameInMeta; + } + + public List getListOfServers() { + return listOfServers; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnFS.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnFS.java new file mode 100644 index 000000000000..43a045fb2933 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnFS.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present Orphan Region on FS from HBCK Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of HBCK Inconsistencies. + */ +@InterfaceAudience.Public +public class HbckOrphanRegionsOnFS { + private final String regionId; + private final String regionHdfsPath; + + public HbckOrphanRegionsOnFS(String regionId, String orphanRegionHdfsPath) { + this.regionId = regionId; + this.regionHdfsPath = orphanRegionHdfsPath; + } + + public String getRegionId() { + return regionId; + } + + public String getRegionHdfsPath() { + return regionHdfsPath; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnRS.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnRS.java new file mode 100644 index 000000000000..2d442b7a9e40 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOrphanRegionsOnRS.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present Orphan Region on RS from HBCK Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of HBCK Inconsistencies. + */ +@InterfaceAudience.Public +public class HbckOrphanRegionsOnRS { + private final String regionId; + private final HbckServerName rsName; + + public HbckOrphanRegionsOnRS(String orphanRegionId, HbckServerName orphanRegionRsName) { + this.regionId = orphanRegionId; + this.rsName = orphanRegionRsName; + } + + public String getRegionId() { + return regionId; + } + + public HbckServerName getRsName() { + return rsName; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOverlapRegions.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOverlapRegions.java new file mode 100644 index 000000000000..4170932bf563 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckOverlapRegions.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present Region Overlap from Catalog Janitor Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of Catalog Janitor + * inconsistencies. + */ +@InterfaceAudience.Public +public class HbckOverlapRegions { + private final HbckRegionDetails region1Info; + private final HbckRegionDetails region2Info; + + public HbckOverlapRegions(HbckRegionDetails region1Info, HbckRegionDetails region2Info) { + this.region1Info = region1Info; + this.region2Info = region2Info; + } + + public HbckRegionDetails getRegion1Info() { + return region1Info; + } + + public HbckRegionDetails getRegion2Info() { + return region2Info; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionDetails.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionDetails.java new file mode 100644 index 000000000000..a79245636276 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionDetails.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO class for HBCK RegionInfo in HBCK Inconsistencies report. + */ +@InterfaceAudience.Public +public class HbckRegionDetails { + private final String regionId; + private final String tableName; + private final String startKey; + private final String endKey; + + public HbckRegionDetails(String regionId, String tableName, String startKey, String endKey) { + this.regionId = regionId; + this.tableName = tableName; + this.startKey = startKey; + this.endKey = endKey; + } + + public String getRegionId() { + return regionId; + } + + public String getTableName() { + return tableName; + } + + public String getStartKey() { + return startKey; + } + + public String getEndKey() { + return endKey; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionHoles.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionHoles.java new file mode 100644 index 000000000000..643e014735a0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckRegionHoles.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present Region Holes from Catalog Janitor Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of Catalog Janitor + * inconsistencies. + */ +@InterfaceAudience.Public +public class HbckRegionHoles { + private final HbckRegionDetails region1Info; + private final HbckRegionDetails region2Info; + + public HbckRegionHoles(HbckRegionDetails region1Info, HbckRegionDetails region2Info) { + this.region1Info = region1Info; + this.region2Info = region2Info; + } + + public HbckRegionDetails getRegion1Info() { + return region1Info; + } + + public HbckRegionDetails getRegion2Info() { + return region2Info; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckServerName.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckServerName.java new file mode 100644 index 000000000000..2c6b899fb15c --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckServerName.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO class for ServerName in HBCK Inconsistencies report. + */ +@InterfaceAudience.Public +public class HbckServerName { + private final String hostName; + private final int hostPort; + private final long startCode; + + public HbckServerName(String hostName, int hostPort, long startCode) { + this.hostName = hostName; + this.hostPort = hostPort; + this.startCode = startCode; + } + + public String getHostName() { + return hostName; + } + + public int getHostPort() { + return hostPort; + } + + public long getStartCode() { + return startCode; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckUnknownServers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckUnknownServers.java new file mode 100644 index 000000000000..c070f84e69fe --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HbckUnknownServers.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * POJO to present Unknown Regions from Catalog Janitor Inconsistencies Report via REST API. These + * inconsistencies are shown on hbck.jsp page on Active HMaster UI as part of Catalog Janitor + * inconsistencies. + */ +@InterfaceAudience.Public +public class HbckUnknownServers { + private final HbckRegionDetails regionInfo; + private final HbckServerName serverName; + + public HbckUnknownServers(HbckRegionDetails regionInfo, HbckServerName unknownServerName) { + this.regionInfo = regionInfo; + this.serverName = unknownServerName; + } + + public HbckRegionDetails getRegionInfo() { + return regionInfo; + } + + public HbckServerName getServerName() { + return serverName; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 0dc5b61cba8c..0f4162cd1f74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -146,6 +146,7 @@ import org.apache.hadoop.hbase.master.http.MasterRedirectServlet; import org.apache.hadoop.hbase.master.http.MasterStatusServlet; import org.apache.hadoop.hbase.master.http.api_v1.ResourceConfigFactory; +import org.apache.hadoop.hbase.master.http.hbck.HbckConfigFactory; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore; @@ -760,6 +761,7 @@ protected MasterRpcServices createRpcServices() throws IOException { protected void configureInfoServer(InfoServer infoServer) { infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class); infoServer.addUnprivilegedServlet("api_v1", "/api/v1/*", buildApiV1Servlet()); + infoServer.addUnprivilegedServlet("hbck", "/hbck/*", buildHbckServlet()); infoServer.setAttribute(MASTER, this); } @@ -769,6 +771,11 @@ private ServletHolder buildApiV1Servlet() { return new ServletHolder(new ServletContainer(config)); } + private ServletHolder buildHbckServlet() { + final ResourceConfig config = HbckConfigFactory.createResourceConfig(conf, this); + return new ServletHolder(new ServletContainer(config)); + } + @Override protected Class getDumpServlet() { return MasterDumpServlet.class; @@ -1377,6 +1384,22 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE status.markComplete("Progress after master initialized complete"); } + /** + * Used for testing only to set Mock objects. + * @param hbckChore hbckChore + */ + public void setHbckChoreForTesting(HbckChore hbckChore) { + this.hbckChore = hbckChore; + } + + /** + * Used for testing only to set Mock objects. + * @param catalogJanitorChore catalogJanitorChore + */ + public void setCatalogJanitorChoreForTesting(CatalogJanitor catalogJanitorChore) { + this.catalogJanitorChore = catalogJanitorChore; + } + private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) throws IOException { TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) @@ -4257,6 +4280,7 @@ public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() { return this.syncReplicationReplayWALManager; } + @Override public HbckChore getHbckChore() { return this.hbckChore; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index b5e25bb44f33..3aa5c2df751b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.hbck.HbckChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; @@ -106,6 +107,9 @@ public interface MasterServices extends Server { /** Returns Master's instance of {@link CatalogJanitor} */ CatalogJanitor getCatalogJanitor(); + /** Returns Master's instance of {@link HbckChore} */ + HbckChore getHbckChore(); + /** Returns Master's instance of {@link ProcedureExecutor} */ ProcedureExecutor getMasterProcedureExecutor(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/HbckConfigFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/HbckConfigFactory.java new file mode 100644 index 000000000000..32dfd4a23b9c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/HbckConfigFactory.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.http.hbck; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.http.jersey.ResponseEntityMapper; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.http.gson.GsonSerializationFeature; +import org.apache.hadoop.hbase.master.http.jersey.MasterFeature; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; +import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ServerProperties; +import org.apache.hbase.thirdparty.org.glassfish.jersey.server.TracingConfig; + +@InterfaceAudience.Private +public final class HbckConfigFactory { + private HbckConfigFactory() { + } + + public static ResourceConfig createResourceConfig(Configuration conf, HMaster master) { + return new ResourceConfig().setApplicationName("hbck") + .packages(HbckConfigFactory.class.getPackage().getName()) + // TODO: anything registered here that does not have necessary bindings won't inject properly + // at annotation sites and will result in a WARN logged by o.a.h.t.o.g.j.i.inject.Providers. + // These warnings should be treated by the service as fatal errors, but I have not found a + // callback API for registering a failed binding handler. + .register(ResponseEntityMapper.class).register(GsonSerializationFeature.class) + .register(new MasterFeature(master)) + + // devs: enable TRACING to see how jersey is dispatching to resources. + // in hbase-site.xml, set 'hbase.http.jersey.tracing.type=ON_DEMAND` and + // to curl, add `-H X-Jersey-Tracing-Accept:true` + .property(ServerProperties.TRACING, + conf.get("hbase.http.jersey.tracing.type", TracingConfig.OFF.name())) + .property(ServerProperties.TRACING_THRESHOLD, + conf.get("hbase.http.jersey.tracing.threshold", "TRACE")); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/model/HbckMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/model/HbckMetrics.java new file mode 100644 index 000000000000..ba4cfecdcf4c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/model/HbckMetrics.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.http.hbck.model; + +import java.util.List; +import org.apache.hadoop.hbase.HbckEmptyRegionInfo; +import org.apache.hadoop.hbase.HbckInconsistentRegions; +import org.apache.hadoop.hbase.HbckOrphanRegionsOnFS; +import org.apache.hadoop.hbase.HbckOrphanRegionsOnRS; +import org.apache.hadoop.hbase.HbckOverlapRegions; +import org.apache.hadoop.hbase.HbckRegionHoles; +import org.apache.hadoop.hbase.HbckUnknownServers; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This class exposes hbck.jsp report as JSON Output via /hbck/hbck-metrics API. + */ +@InterfaceAudience.Private +public class HbckMetrics { + + private final long hbckReportStartTime; + private final long hbckReportEndTime; + private final List hbckOrphanRegionsOnFs; + private final List hbckOrphanRegionsOnRs; + private final List hbckInconsistentRegions; + private final List hbckHoles; + private final List hbckOverlaps; + private final List hbckUnknownServers; + private final List hbckEmptyRegionInfo; + + public HbckMetrics(long hbckReportStartTime, long hbckReportEndTime, + List hbckOrphanRegionsOnFs, + List hbckOrphanRegionsOnRs, + List hbckInconsistentRegions, List hbckHoles, + List hbckOverlaps, List hbckUnknownServers, + List hbckEmptyRegionInfo) { + this.hbckReportStartTime = hbckReportStartTime; + this.hbckReportEndTime = hbckReportEndTime; + this.hbckOrphanRegionsOnFs = hbckOrphanRegionsOnFs; + this.hbckOrphanRegionsOnRs = hbckOrphanRegionsOnRs; + this.hbckInconsistentRegions = hbckInconsistentRegions; + this.hbckHoles = hbckHoles; + this.hbckOverlaps = hbckOverlaps; + this.hbckUnknownServers = hbckUnknownServers; + this.hbckEmptyRegionInfo = hbckEmptyRegionInfo; + } + + public long gethbckReportStartTime() { + return hbckReportStartTime; + } + + public long gethbckReportEndTime() { + return hbckReportEndTime; + } + + public List gethbckOrphanRegionsOnFs() { + return hbckOrphanRegionsOnFs; + } + + public List gethbckOrphanRegionsOnRs() { + return hbckOrphanRegionsOnRs; + } + + public List gethbckInconsistentRegions() { + return hbckInconsistentRegions; + } + + public List gethbckHoles() { + return hbckHoles; + } + + public List gethbckOverlaps() { + return hbckOverlaps; + } + + public List gethbckUnknownServers() { + return hbckUnknownServers; + } + + public List gethbckEmptyRegionInfo() { + return hbckEmptyRegionInfo; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/resource/HbckMetricsResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/resource/HbckMetricsResource.java new file mode 100644 index 000000000000..96924aa126d3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/hbck/resource/HbckMetricsResource.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.http.hbck.resource; + +import java.util.List; +import java.util.stream.Collectors; +import javax.inject.Inject; +import org.apache.hadoop.hbase.HbckEmptyRegionInfo; +import org.apache.hadoop.hbase.HbckInconsistentRegions; +import org.apache.hadoop.hbase.HbckOrphanRegionsOnFS; +import org.apache.hadoop.hbase.HbckOrphanRegionsOnRS; +import org.apache.hadoop.hbase.HbckOverlapRegions; +import org.apache.hadoop.hbase.HbckRegionDetails; +import org.apache.hadoop.hbase.HbckRegionHoles; +import org.apache.hadoop.hbase.HbckServerName; +import org.apache.hadoop.hbase.HbckUnknownServers; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.hbck.HbckReport; +import org.apache.hadoop.hbase.master.http.hbck.model.HbckMetrics; +import org.apache.hadoop.hbase.master.janitor.CatalogJanitorReport; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.javax.ws.rs.GET; +import org.apache.hbase.thirdparty.javax.ws.rs.Path; +import org.apache.hbase.thirdparty.javax.ws.rs.Produces; +import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; + +/** + * The root object exposing hbck.jsp page as JSON Output. + */ +@Path("hbck-metrics") +@Produces({ MediaType.APPLICATION_JSON }) +@InterfaceAudience.Private +public class HbckMetricsResource { + private final HbckReport hbckReport; + private final CatalogJanitorReport catalogJanitorReport; + + @Inject + public HbckMetricsResource(MasterServices master) { + this.hbckReport = master.getHbckChore().getLastReport(); + this.catalogJanitorReport = master.getCatalogJanitor().getLastReport(); + } + + @GET + public HbckMetrics getBaseHbckMetrics() { + return new HbckMetrics(hbckReport.getCheckingStartTimestamp().toEpochMilli(), + hbckReport.getCheckingEndTimestamp().toEpochMilli(), getOrphanRegionsOnFS(), + getOrphanRegionsOnRS(), getInconsistentRegions(), getRegionChainHoles(), + getRegionChainOverlap(), getUnknownServers(), getEmptyRegionInfo()); + } + + @GET + @Path("/orphan-regions-on-fs") + public List getOrphanRegionsOnFS() { + return hbckReport.getOrphanRegionsOnFS().entrySet().stream() + .map(obj1 -> new HbckOrphanRegionsOnFS(obj1.getKey(), obj1.getValue().toString())) + .collect(Collectors.toList()); + } + + @GET + @Path("/orphan-regions-on-rs") + public List getOrphanRegionsOnRS() { + return hbckReport.getOrphanRegionsOnRS().entrySet().stream() + .map(obj1 -> new HbckOrphanRegionsOnRS(obj1.getKey(), parseServerName(obj1.getValue()))) + .collect(Collectors.toList()); + } + + @GET + @Path("/inconsistent-regions") + public List getInconsistentRegions() { + return hbckReport.getInconsistentRegions().entrySet().stream() + .map(obj1 -> new HbckInconsistentRegions(obj1.getKey(), + parseServerName(obj1.getValue().getFirst()), obj1.getValue().getSecond().stream() + .map(this::parseServerName).collect(Collectors.toList()))) + .collect(Collectors.toList()); + } + + @GET + @Path("/region-holes") + public List getRegionChainHoles() { + return catalogJanitorReport.getHoles().stream() + .map(obj1 -> new HbckRegionHoles(parseRegionInfo(obj1.getFirst()), + parseRegionInfo(obj1.getSecond()))) + .collect(Collectors.toList()); + } + + @GET + @Path("/region-overlaps") + public List getRegionChainOverlap() { + return catalogJanitorReport.getOverlaps().stream() + .map(obj1 -> new HbckOverlapRegions(parseRegionInfo(obj1.getFirst()), + parseRegionInfo(obj1.getSecond()))) + .collect(Collectors.toList()); + } + + @GET + @Path("/unknown-servers") + public List getUnknownServers() { + return catalogJanitorReport.getUnknownServers().stream() + .map(obj1 -> new HbckUnknownServers(parseRegionInfo(obj1.getFirst()), + parseServerName(obj1.getSecond()))) + .collect(Collectors.toList()); + } + + @GET + @Path("/empty-regioninfo") + public List getEmptyRegionInfo() { + return catalogJanitorReport.getEmptyRegionInfo().stream() + .map(obj1 -> new HbckEmptyRegionInfo(Bytes.toString(obj1))).collect(Collectors.toList()); + } + + public HbckRegionDetails parseRegionInfo(RegionInfo regionInfo) { + return new HbckRegionDetails(regionInfo.getEncodedName(), + regionInfo.getTable().getNameAsString(), new String(regionInfo.getStartKey()), + new String(regionInfo.getEndKey())); + } + + public HbckServerName parseServerName(ServerName serverName) { + return new HbckServerName(serverName.getHostname(), serverName.getPort(), + serverName.getStartCode()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 0c4f3d7db266..3d4d63722e09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.hbck.HbckChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; @@ -117,6 +118,11 @@ public CatalogJanitor getCatalogJanitor() { return null; } + @Override + public HbckChore getHbckChore() { + return null; + } + @Override public MasterFileSystem getMasterFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestHbckMetricsResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestHbckMetricsResource.java new file mode 100644 index 000000000000..6ed5087ccbdb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestHbckMetricsResource.java @@ -0,0 +1,422 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.http; + +import static org.apache.hadoop.hbase.client.RegionInfoBuilder.FIRST_META_REGIONINFO; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.startsWith; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ConnectionRule; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MiniClusterRule; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.StartTestingClusterOption; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.AsyncAdmin; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.hbck.HbckChore; +import org.apache.hadoop.hbase.master.hbck.HbckReport; +import org.apache.hadoop.hbase.master.http.hbck.resource.HbckMetricsResource; +import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; +import org.apache.hadoop.hbase.master.janitor.CatalogJanitorReport; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.ExternalResource; +import org.junit.rules.RuleChain; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.javax.ws.rs.NotAcceptableException; +import org.apache.hbase.thirdparty.javax.ws.rs.client.Client; +import org.apache.hbase.thirdparty.javax.ws.rs.client.ClientBuilder; +import org.apache.hbase.thirdparty.javax.ws.rs.client.WebTarget; +import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; + +/** + * Tests for the {@link HbckMetricsResource}. + */ +@Category({ MasterTests.class, LargeTests.class }) +public class TestHbckMetricsResource { + + private static final Logger LOG = LoggerFactory.getLogger(TestHbckMetricsResource.class); + + // Test data for Mock HBCK Report + private static final long reportStartTime = 123456789000L; + private static final long reportEndTime = 234567890000L; + private static final String regionId1 = "regionId1"; + private static final String regionId2 = "regionId2"; + private static final String localhost1 = "localhost1"; + private static final String localhost2 = "localhost2"; + private static final String port = "16010"; + private static final String hostStartCode = "123456789"; + private static final String path1 = "hdfs://path1"; + private static final String path2 = "hdfs://path2"; + private static final String metaRegionID = FIRST_META_REGIONINFO.getEncodedName(); + private static final String metaTableName = FIRST_META_REGIONINFO.getTable().getNameAsString(); + + // Various Keys in HBCK JSON Response. + private static final String quoteColon = "\":"; + private static final String quote = "\""; + private static final String regionId = quote + "region_id" + quoteColon; + private static final String regionHdfsPath = quote + "region_hdfs_path" + quoteColon; + private static final String rsName = quote + "rs_name" + quoteColon; + private static final String hostName = quote + "host_name" + quoteColon; + private static final String hostPort = quote + "host_port" + quoteColon; + private static final String startCode = quote + "start_code" + quoteColon; + private static final String serverNameInMeta = quote + "server_name_in_meta" + quoteColon; + private static final String listOfServers = quote + "list_of_servers" + quoteColon; + private static final String region1Info = quote + "region1_info" + quoteColon; + private static final String region2Info = quote + "region2_info" + quoteColon; + private static final String regionInfo = quote + "region_info" + quoteColon; + private static final String serverName = quote + "server_name" + quoteColon; + private static final String tableName = quote + "table_name" + quoteColon; + + private static final String dataStartsWith = "{\"data\":["; + private static final String dataEndsWith = "]}"; + private static final String hbckReportStartTime = quote + "hbck_report_start_time" + quoteColon; + private static final String hbckReportEndTime = quote + "hbck_report_end_time" + quoteColon; + private static final String hbckOrphanRegionOnFS = + quote + "hbck_orphan_regions_on_fs" + quoteColon; + private static final String hbckOrphanRegionOnRS = + quote + "hbck_orphan_regions_on_rs" + quoteColon; + private static final String hbckInconsistentRegion = + quote + "hbck_inconsistent_regions" + quoteColon; + private static final String hbckHoles = quote + "hbck_holes" + quoteColon; + private static final String hbckOverlaps = quote + "hbck_overlaps" + quoteColon; + private static final String hbckUnknownServers = quote + "hbck_unknown_servers" + quoteColon; + private static final String hbckEmptyRegionInfo = quote + "hbck_empty_region_info" + quoteColon; + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHbckMetricsResource.class); + + private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder() + .setMiniClusterOption( + StartTestingClusterOption.builder().numZkServers(3).numMasters(3).numDataNodes(3).build()) + .setConfiguration(() -> { + // enable Master InfoServer and random port selection + final Configuration conf = HBaseConfiguration.create(); + conf.setInt(HConstants.MASTER_INFO_PORT, 0); + conf.set("hbase.http.jersey.tracing.type", "ON_DEMAND"); + return conf; + }).build(); + + private static final ConnectionRule connectionRule = + ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); + private static final ClassSetup classRule = new ClassSetup(connectionRule::getAsyncConnection); + + private static final class ClassSetup extends ExternalResource { + + private final Supplier connectionSupplier; + private final TableName tableName; + private AsyncAdmin admin; + private WebTarget target; + + public ClassSetup(final Supplier connectionSupplier) { + this.connectionSupplier = connectionSupplier; + tableName = TableName.valueOf(TestHbckMetricsResource.class.getSimpleName()); + } + + public WebTarget getTarget() { + return target; + } + + @Override + protected void before() throws Throwable { + final AsyncConnection conn = connectionSupplier.get(); + admin = conn.getAdmin(); + final TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("c")).build()) + .setDurability(Durability.SKIP_WAL).build(); + admin.createTable(tableDescriptor).get(); + + HMaster master = miniClusterRule.getTestingUtility().getMiniHBaseCluster().getMaster(); + + HbckChore hbckChore = mock(HbckChore.class); + HbckReport hbckReport = mock(HbckReport.class); + CatalogJanitor catalogJanitorChore = mock(CatalogJanitor.class); + CatalogJanitorReport catalogJanitorReport = mock(CatalogJanitorReport.class); + master.setHbckChoreForTesting(hbckChore); + master.setCatalogJanitorChoreForTesting(catalogJanitorChore); + + // Test data for Mock HBCK Report + ServerName server1 = + ServerName.valueOf(localhost1, Integer.parseInt(port), Integer.parseInt(hostStartCode)); + ServerName server2 = + ServerName.valueOf(localhost2, Integer.parseInt(port), Integer.parseInt(hostStartCode)); + Path hdfsPath1 = new Path(path1); + Path hdfsPath2 = new Path(path2); + + // Orphan on RS Test data + Map mapOfOrphanRegionsOnRS = new HashMap<>(); + mapOfOrphanRegionsOnRS.put(regionId1, server1); + mapOfOrphanRegionsOnRS.put(regionId2, server2); + + // Orphan Region on FS Test Data + Map mapOfOrphanRegionOnFS = new HashMap<>(); + mapOfOrphanRegionOnFS.put(regionId1, hdfsPath1); + mapOfOrphanRegionOnFS.put(regionId2, hdfsPath2); + + // Inconsistent Regions Test Data + Map>> mapOfInconsistentRegions = new HashMap<>(); + mapOfInconsistentRegions.put(regionId1, new Pair<>(server1, Arrays.asList(server1, server2))); + mapOfInconsistentRegions.put(regionId2, new Pair<>(server2, Arrays.asList(server1, server2))); + + // Region Overlap and Region Holes Test Data + List> listOfRegion = new ArrayList<>(); + listOfRegion.add(new Pair<>(FIRST_META_REGIONINFO, FIRST_META_REGIONINFO)); + listOfRegion.add(new Pair<>(FIRST_META_REGIONINFO, FIRST_META_REGIONINFO)); + + // Unknown RegionServer Test Data + List> listOfUnknownServers = new ArrayList<>(); + listOfUnknownServers.add(new Pair<>(FIRST_META_REGIONINFO, server1)); + listOfUnknownServers.add(new Pair<>(FIRST_META_REGIONINFO, server2)); + + // Empty Region Info Test Data + List listOfEmptyRegionInfo = new ArrayList<>(); + listOfEmptyRegionInfo.add(regionId1.getBytes()); + listOfEmptyRegionInfo.add(regionId2.getBytes()); + + // Mock HBCK Report and CatalogJanitor Report + when(hbckReport.getCheckingStartTimestamp()) + .thenReturn(Instant.ofEpochMilli(reportStartTime)); + when(hbckReport.getCheckingEndTimestamp()).thenReturn(Instant.ofEpochSecond(reportEndTime)); + when(hbckReport.getOrphanRegionsOnFS()).thenReturn(mapOfOrphanRegionOnFS); + when(hbckReport.getOrphanRegionsOnRS()).thenReturn(mapOfOrphanRegionsOnRS); + when(hbckReport.getInconsistentRegions()).thenReturn(mapOfInconsistentRegions); + when(catalogJanitorReport.getHoles()).thenReturn(listOfRegion); + when(catalogJanitorReport.getOverlaps()).thenReturn(listOfRegion); + when(catalogJanitorReport.getUnknownServers()).thenReturn(listOfUnknownServers); + when(catalogJanitorReport.getEmptyRegionInfo()).thenReturn(listOfEmptyRegionInfo); + + Mockito.doReturn(hbckReport).when(hbckChore).getLastReport(); + Mockito.doReturn(catalogJanitorReport).when(catalogJanitorChore).getLastReport(); + + final String baseUrl = + admin.getMaster().thenApply(ServerName::getHostname).thenCombine(admin.getMasterInfoPort(), + (hostName, infoPort) -> "http://" + hostName + ":" + infoPort).get(); + final Client client = ClientBuilder.newClient(); + target = client.target(baseUrl).path("hbck/hbck-metrics"); + } + + @Override + protected void after() { + final TableName tableName = TableName.valueOf("test"); + try { + admin.tableExists(tableName).thenCompose(val -> { + if (val) { + return admin.disableTable(tableName) + .thenCompose(ignored -> admin.deleteTable(tableName)); + } else { + return CompletableFuture.completedFuture(null); + } + }).get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + @ClassRule + public static RuleChain ruleChain = + RuleChain.outerRule(miniClusterRule).around(connectionRule).around(classRule); + + @Test + public void testGetRoot() { + final String response = classRule.getTarget().request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK JSON Response : " + response); + assertThat(response, + allOf(containsString(hbckReportStartTime), containsString(hbckReportEndTime), + containsString(hbckOrphanRegionOnFS), containsString(hbckOrphanRegionOnRS), + containsString(hbckInconsistentRegion), containsString(hbckHoles), + containsString(hbckOverlaps), containsString(hbckUnknownServers), + containsString(hbckEmptyRegionInfo), containsString(Objects.toString(reportStartTime)), + containsString(Objects.toString(reportEndTime)))); + } + + @Test + public void testGetRootHtml() { + assertThrows(NotAcceptableException.class, () -> classRule.getTarget() + .request(MediaType.TEXT_HTML_TYPE).header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetOrphanRegionOnFS() { + final String response = + classRule.getTarget().path("orphan-regions-on-fs").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource orphan-regions-on-fs : " + response); + assertThat(response, + allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), containsString(regionId), + containsString(regionHdfsPath), containsString(regionId1), containsString(regionId2), + containsString(path1), containsString(path2))); + } + + @Test + public void testGetOrphanRegionOnFSHtml() { + assertThrows(NotAcceptableException.class, + () -> classRule.getTarget().path("orphan-regions-on-fs").request(MediaType.TEXT_HTML_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetOrphanRegionOnRS() { + final String response = + classRule.getTarget().path("orphan-regions-on-rs").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource orphan-regions-on-rs : " + response); + assertThat(response, + allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), containsString(regionId), + containsString(rsName), containsString(hostName), containsString(hostPort), + containsString(startCode), containsString(regionId1), containsString(regionId2), + containsString(localhost1), containsString(localhost2), containsString(port), + containsString(hostStartCode))); + } + + @Test + public void testGetOrphanRegionOnRSHtml() { + assertThrows(NotAcceptableException.class, + () -> classRule.getTarget().path("orphan-regions-on-rs").request(MediaType.TEXT_HTML_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetInconsistentRegions() { + final String response = + classRule.getTarget().path("inconsistent-regions").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource inconsistent-regions : " + response); + assertThat(response, + allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), containsString(hostName), + containsString(hostPort), containsString(startCode), containsString(listOfServers), + containsString(regionId1), containsString(regionId2), containsString(regionId), + containsString(serverNameInMeta), containsString(localhost1), containsString(localhost2), + containsString(port), containsString(hostStartCode))); + } + + @Test + public void testGetInconsistentRegionsHtml() { + assertThrows(NotAcceptableException.class, + () -> classRule.getTarget().path("inconsistent-regions").request(MediaType.TEXT_HTML_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetRegionHoles() { + final String response = + classRule.getTarget().path("region-holes").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource region-holes : " + response); + assertThat(response, + allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), containsString(region1Info), + containsString(region2Info), containsString(regionId), containsString(tableName), + containsString(metaRegionID), containsString(metaTableName))); + } + + @Test + public void testGetRegionHolesHtml() { + assertThrows(NotAcceptableException.class, () -> classRule.getTarget().path("region-holes") + .request(MediaType.TEXT_HTML_TYPE).header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetRegionOverlaps() { + final String response = + classRule.getTarget().path("region-overlaps").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource region-overlaps : " + response); + assertThat(response, + allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), containsString(regionId), + containsString(tableName), containsString(region2Info), containsString(region2Info), + containsString(metaRegionID), containsString(metaTableName))); + } + + @Test + public void testGetRegionOverlapsHtml() { + assertThrows(NotAcceptableException.class, () -> classRule.getTarget().path("region-overlaps") + .request(MediaType.TEXT_HTML_TYPE).header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetUnkownServers() { + final String response = + classRule.getTarget().path("unknown-servers").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource unknown-servers : " + response); + assertThat(response, + allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), containsString(regionInfo), + containsString(regionId), containsString(tableName), containsString(serverName), + containsString(serverName), containsString(port), containsString(startCode), + containsString(metaRegionID), containsString(metaTableName), containsString(localhost1), + containsString(localhost2), containsString(port), containsString(startCode))); + } + + @Test + public void testGetUnkownServersHtml() { + assertThrows(NotAcceptableException.class, () -> classRule.getTarget().path("unknown-servers") + .request(MediaType.TEXT_HTML_TYPE).header("X-Jersey-Tracing-Accept", true).get(String.class)); + } + + @Test + public void testGetEmptyRegionInfo() { + final String response = + classRule.getTarget().path("empty-regioninfo").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + LOG.info("HBCK Response for resource empty-regioninfo : " + response); + assertThat(response, allOf(startsWith(dataStartsWith), endsWith(dataEndsWith), + containsString(regionInfo), containsString(regionId1), containsString(regionId2))); + } + + @Test + public void testGetEmptyRegionInfoHtml() { + assertThrows(NotAcceptableException.class, () -> classRule.getTarget().path("empty-regioninfo") + .request(MediaType.TEXT_HTML_TYPE).header("X-Jersey-Tracing-Accept", true).get(String.class)); + } +} From 1a089cd3935d36e51c49e83828115efb7cffbf2a Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Tue, 9 Apr 2024 04:20:47 -0400 Subject: [PATCH 316/514] HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) Co-authored-by: Charles Connell Signed-off-by: Andrew Purtell Signed-off-by: Nick Dimiduk --- .../io/compress/zstd/ZstdCompressor.java | 20 +++++++++++++------ .../io/compress/zstd/ZstdDecompressor.java | 19 +++++++++++------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 4d34d4825d31..b48db9106fb4 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.compress.zstd; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdCompressCtx; import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +40,7 @@ public class ZstdCompressor implements CanReinit, Compressor { protected long bytesRead, bytesWritten; protected int dictId; protected ZstdDictCompress dict; + protected ZstdCompressCtx ctx; ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) { this.level = level; @@ -46,9 +48,12 @@ public class ZstdCompressor implements CanReinit, Compressor { this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); + this.ctx = new ZstdCompressCtx(); + this.ctx.setLevel(level); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictCompress(dictionary, level); + this.ctx.loadDict(this.dict); } } @@ -79,12 +84,7 @@ public int compress(final byte[] b, final int off, final int len) throws IOExcep } else { outBuf.clear(); } - int written; - if (dict != null) { - written = Zstd.compress(outBuf, inBuf, dict); - } else { - written = Zstd.compress(outBuf, inBuf, level); - } + int written = ctx.compress(outBuf, inBuf); bytesWritten += written; inBuf.clear(); finished = true; @@ -170,6 +170,14 @@ public void reset() { bytesWritten = 0; finish = false; finished = false; + ctx.reset(); + ctx.setLevel(level); + if (dict != null) { + ctx.loadDict(dict); + } else { + // loadDict((byte[]) accepts null to clear the dictionary + ctx.loadDict((byte[]) null); + } } @Override diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index ef0a0f87651f..79826c96d5e3 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.compress.zstd; -import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDecompressCtx; import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,15 +39,18 @@ public class ZstdDecompressor implements CanReinit, Decompressor { protected boolean finished; protected int dictId; protected ZstdDictDecompress dict; + protected ZstdDecompressCtx ctx; ZstdDecompressor(final int bufferSize, final byte[] dictionary) { this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); + this.ctx = new ZstdDecompressCtx(); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictDecompress(dictionary); + this.ctx.loadDict(this.dict); } } @@ -67,12 +70,7 @@ public int decompress(final byte[] b, final int off, final int len) throws IOExc int remaining = inBuf.remaining(); inLen -= remaining; outBuf.clear(); - int written; - if (dict != null) { - written = Zstd.decompress(outBuf, inBuf, dict); - } else { - written = Zstd.decompress(outBuf, inBuf); - } + int written = ctx.decompress(outBuf, inBuf); inBuf.clear(); outBuf.flip(); int n = Math.min(written, len); @@ -109,6 +107,13 @@ public void reset() { outBuf.clear(); outBuf.position(outBuf.capacity()); finished = false; + ctx.reset(); + if (dict != null) { + ctx.loadDict(dict); + } else { + // loadDict((byte[]) accepts null to clear the dictionary + ctx.loadDict((byte[]) null); + } } @Override From adc79a0a9c2b579915a902f611a66edfddf3149c Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 9 Apr 2024 10:55:39 -0700 Subject: [PATCH 317/514] HBASE-28448 CompressionTest hangs when run over a Ozone ofs path (#5771) This bug was found via HDDS-10564. --- .../hadoop/hbase/util/CompressionTest.java | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 0870dbe6f9bc..9065ebf116b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -152,17 +152,18 @@ public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Path path = new Path(args[0]); - FileSystem fs = path.getFileSystem(conf); - if (fs.exists(path)) { - System.err.println("The specified path exists, aborting!"); - System.exit(1); - } + try (FileSystem fs = path.getFileSystem(conf)) { + if (fs.exists(path)) { + System.err.println("The specified path exists, aborting!"); + System.exit(1); + } - try { - doSmokeTest(fs, path, args[1]); - } finally { - fs.delete(path, false); + try { + doSmokeTest(fs, path, args[1]); + } finally { + fs.delete(path, false); + } + System.out.println("SUCCESS"); } - System.out.println("SUCCESS"); } } From 5d694dae5e461517ea89356b8ea124c406ac002f Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 10 Apr 2024 10:55:14 -0700 Subject: [PATCH 318/514] HBASE-28506 Remove hbase-compression-xz (#5811) Signed-off-by: Bryan Beaudreault Signed-off-by: Duo Zhang --- hbase-assembly/pom.xml | 4 - .../hbase-compression-xz/pom.xml | 166 ------------- .../hbase/io/compress/xz/LzmaCodec.java | 126 ---------- .../hbase/io/compress/xz/LzmaCompressor.java | 223 ------------------ .../io/compress/xz/LzmaDecompressor.java | 151 ------------ .../compress/xz/TestHFileCompressionLzma.java | 76 ------ .../hbase/io/compress/xz/TestLzmaCodec.java | 58 ----- .../compress/xz/TestWALCompressionLzma.java | 56 ----- hbase-compression/pom.xml | 1 - pom.xml | 11 - 10 files changed, 872 deletions(-) delete mode 100644 hbase-compression/hbase-compression-xz/pom.xml delete mode 100644 hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java delete mode 100644 hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java delete mode 100644 hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java delete mode 100644 hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java delete mode 100644 hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java delete mode 100644 hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 44f8d0a198ff..13142036f077 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -184,10 +184,6 @@ org.apache.hbase hbase-compression-snappy
- - org.apache.hbase - hbase-compression-xz - org.apache.hbase hbase-compression-zstd diff --git a/hbase-compression/hbase-compression-xz/pom.xml b/hbase-compression/hbase-compression-xz/pom.xml deleted file mode 100644 index b34cc1bf5928..000000000000 --- a/hbase-compression/hbase-compression-xz/pom.xml +++ /dev/null @@ -1,166 +0,0 @@ - - - - 4.0.0 - - org.apache.hbase - hbase-compression - ${revision} - ../pom.xml - - hbase-compression-xz - Apache HBase - Compression - XZ - Pure Java compression support using XZ for Java - - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-logging - test-jar - test - - - org.apache.hbase - hbase-common - test-jar - test - - - org.apache.hbase - hbase-testing-util - test - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.slf4j - slf4j-api - - - com.github.stephenc.findbugs - findbugs-annotations - compile - true - - - - org.tukaani - xz - - - - org.slf4j - jcl-over-slf4j - test - - - org.slf4j - jul-to-slf4j - test - - - org.apache.logging.log4j - log4j-api - test - - - org.apache.logging.log4j - log4j-core - test - - - org.apache.logging.log4j - log4j-slf4j-impl - test - - - org.apache.logging.log4j - log4j-1.2-api - test - - - org.hamcrest - hamcrest-library - test - - - org.mockito - mockito-core - test - - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - build-with-jdk11 - - [1.11,) - - - - javax.annotation - javax.annotation-api - - - - - diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java deleted file mode 100644 index a5d583d770c0..000000000000 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.compress.xz; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; -import org.apache.hadoop.io.compress.BlockCompressorStream; -import org.apache.hadoop.io.compress.BlockDecompressorStream; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.io.compress.CompressionInputStream; -import org.apache.hadoop.io.compress.CompressionOutputStream; -import org.apache.hadoop.io.compress.Compressor; -import org.apache.hadoop.io.compress.Decompressor; -import org.apache.yetus.audience.InterfaceAudience; - -/** - * Hadoop lzma codec implemented with XZ for Java. - */ -@InterfaceAudience.Private -public class LzmaCodec implements Configurable, CompressionCodec { - - public static final String LZMA_LEVEL_KEY = "hbase.io.compress.lzma.level"; - public static final int LZMA_LEVEL_DEFAULT = 6; - public static final String LZMA_BUFFERSIZE_KEY = "hbase.io.compress.lzma.buffersize"; - public static final int LZMA_BUFFERSIZE_DEFAULT = 256 * 1024; - - private Configuration conf; - private int bufferSize; - private int level; - - public LzmaCodec() { - conf = new Configuration(); - bufferSize = getBufferSize(conf); - level = getLevel(conf); - } - - @Override - public Configuration getConf() { - return conf; - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - this.bufferSize = getBufferSize(conf); - this.level = getLevel(conf); - } - - @Override - public Compressor createCompressor() { - return new LzmaCompressor(level, bufferSize); - } - - @Override - public Decompressor createDecompressor() { - return new LzmaDecompressor(bufferSize); - } - - @Override - public CompressionInputStream createInputStream(InputStream in) throws IOException { - return createInputStream(in, createDecompressor()); - } - - @Override - public CompressionInputStream createInputStream(InputStream in, Decompressor d) - throws IOException { - return new BlockDecompressorStream(in, d, bufferSize); - } - - @Override - public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { - return createOutputStream(out, createCompressor()); - } - - @Override - public CompressionOutputStream createOutputStream(OutputStream out, Compressor c) - throws IOException { - return new BlockCompressorStream(out, c, bufferSize, - CompressionUtil.compressionOverhead(bufferSize)); - } - - @Override - public Class getCompressorType() { - return LzmaCompressor.class; - } - - @Override - public Class getDecompressorType() { - return LzmaDecompressor.class; - } - - @Override - public String getDefaultExtension() { - return ".lzma"; - } - - // Package private - - static int getLevel(Configuration conf) { - return conf.getInt(LZMA_LEVEL_KEY, LZMA_LEVEL_DEFAULT); - } - - static int getBufferSize(Configuration conf) { - return conf.getInt(LZMA_BUFFERSIZE_KEY, LZMA_BUFFERSIZE_DEFAULT); - } - -} diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java deleted file mode 100644 index 88d0f0d0b1cf..000000000000 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.compress.xz; - -import java.io.IOException; -import java.nio.BufferOverflowException; -import java.nio.ByteBuffer; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.io.ByteBufferOutputStream; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; -import org.apache.hadoop.io.compress.Compressor; -import org.apache.yetus.audience.InterfaceAudience; -import org.tukaani.xz.ArrayCache; -import org.tukaani.xz.BasicArrayCache; -import org.tukaani.xz.LZMA2Options; -import org.tukaani.xz.LZMAOutputStream; -import org.tukaani.xz.UnsupportedOptionsException; - -/** - * Hadoop compressor glue for XZ for Java. - */ -@InterfaceAudience.Private -public class LzmaCompressor implements Compressor { - - protected static final ArrayCache ARRAY_CACHE = new BasicArrayCache(); - protected ByteBuffer inBuf; - protected ByteBuffer outBuf; - protected int bufferSize; - protected boolean finish, finished; - protected long bytesRead, bytesWritten; - protected LZMA2Options lzOptions; - - LzmaCompressor(int level, int bufferSize) { - this.bufferSize = bufferSize; - this.inBuf = ByteBuffer.allocate(bufferSize); - this.outBuf = ByteBuffer.allocate(bufferSize); - this.outBuf.position(bufferSize); - this.lzOptions = new LZMA2Options(); - try { - this.lzOptions.setPreset(level); - } catch (UnsupportedOptionsException e) { - throw new RuntimeException(e); - } - } - - @Override - public int compress(byte[] b, int off, int len) throws IOException { - // If we have previously compressed our input and still have some buffered bytes - // remaining, provide them to the caller. - if (outBuf.hasRemaining()) { - int remaining = outBuf.remaining(), n = Math.min(remaining, len); - outBuf.get(b, off, n); - return n; - } - // We don't actually begin compression until our caller calls finish(). - if (finish) { - if (inBuf.position() > 0) { - inBuf.flip(); - int uncompressed = inBuf.remaining(); - // If we don't have enough capacity in our currently allocated output buffer, - // allocate a new one which does. - int needed = maxCompressedLength(uncompressed); - // Can we decompress directly into the provided array? - ByteBuffer writeBuffer; - boolean direct = false; - if (len <= needed) { - writeBuffer = ByteBuffer.wrap(b, off, len); - direct = true; - } else { - if (outBuf.capacity() < needed) { - needed = CompressionUtil.roundInt2(needed); - outBuf = ByteBuffer.allocate(needed); - } else { - outBuf.clear(); - } - writeBuffer = outBuf; - } - int oldPos = writeBuffer.position(); - // This is pretty ugly. I don't see how to do it better. Stream to byte buffers back to - // stream back to byte buffers... if only XZ for Java had a public block compression - // API. It does not. Fortunately the algorithm is so slow, especially at higher levels, - // that inefficiencies here may not matter. - try (ByteBufferOutputStream lowerOut = new ByteBufferOutputStream(writeBuffer) { - @Override - // ByteBufferOutputStream will reallocate the output buffer if it is too small. We - // do not want that behavior here. - protected void checkSizeAndGrow(int extra) { - long capacityNeeded = curBuf.position() + (long) extra; - if (capacityNeeded > curBuf.limit()) { - throw new BufferOverflowException(); - } - } - }) { - try (LZMAOutputStream out = - new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) { - out.write(inBuf.array(), inBuf.arrayOffset(), uncompressed); - } - } - int written = writeBuffer.position() - oldPos; - bytesWritten += written; - inBuf.clear(); - finished = true; - outBuf.flip(); - if (!direct) { - int n = Math.min(written, len); - outBuf.get(b, off, n); - return n; - } else { - return written; - } - } else { - finished = true; - } - } - return 0; - } - - @Override - public void end() { - } - - @Override - public void finish() { - finish = true; - } - - @Override - public boolean finished() { - return finished && !outBuf.hasRemaining(); - } - - @Override - public long getBytesRead() { - return bytesRead; - } - - @Override - public long getBytesWritten() { - return bytesWritten; - } - - @Override - public boolean needsInput() { - return !finished(); - } - - @Override - public void reinit(Configuration conf) { - if (conf != null) { - // Level might have changed - try { - int level = LzmaCodec.getLevel(conf); - this.lzOptions = new LZMA2Options(); - this.lzOptions.setPreset(level); - } catch (UnsupportedOptionsException e) { - throw new RuntimeException(e); - } - // Buffer size might have changed - int newBufferSize = LzmaCodec.getBufferSize(conf); - if (bufferSize != newBufferSize) { - bufferSize = newBufferSize; - this.inBuf = ByteBuffer.allocate(bufferSize); - this.outBuf = ByteBuffer.allocate(bufferSize); - } - } - reset(); - } - - @Override - public void reset() { - inBuf.clear(); - outBuf.clear(); - outBuf.position(outBuf.capacity()); - bytesRead = 0; - bytesWritten = 0; - finish = false; - finished = false; - } - - @Override - public void setDictionary(byte[] b, int off, int len) { - throw new UnsupportedOperationException("setDictionary is not supported"); - } - - @Override - public void setInput(byte[] b, int off, int len) { - if (inBuf.remaining() < len) { - // Get a new buffer that can accomodate the accumulated input plus the additional - // input that would cause a buffer overflow without reallocation. - // This condition should be fortunately rare, because it is expensive. - int needed = CompressionUtil.roundInt2(inBuf.capacity() + len); - ByteBuffer newBuf = ByteBuffer.allocate(needed); - inBuf.flip(); - newBuf.put(inBuf); - inBuf = newBuf; - } - inBuf.put(b, off, len); - bytesRead += len; - finished = false; - } - - // Package private - - int maxCompressedLength(int len) { - return len + CompressionUtil.compressionOverhead(len); - } - -} diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java deleted file mode 100644 index b1d065485b5d..000000000000 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.compress.xz; - -import java.io.IOException; -import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.io.ByteBufferInputStream; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; -import org.apache.hadoop.io.compress.Decompressor; -import org.apache.yetus.audience.InterfaceAudience; -import org.tukaani.xz.ArrayCache; -import org.tukaani.xz.BasicArrayCache; -import org.tukaani.xz.LZMAInputStream; - -/** - * Hadoop decompressor glue for XZ for Java. - */ -@InterfaceAudience.Private -public class LzmaDecompressor implements Decompressor { - - protected static final ArrayCache ARRAY_CACHE = new BasicArrayCache() { - @Override - public byte[] getByteArray(int size, boolean fillWithZeros) { - // Work around a bug in XZ decompression if cached byte arrays are not cleared by - // always clearing them. - return super.getByteArray(size, true); - } - }; - protected ByteBuffer inBuf, outBuf; - protected int inLen; - protected boolean finished; - - LzmaDecompressor(int bufferSize) { - this.inBuf = ByteBuffer.allocate(bufferSize); - this.outBuf = ByteBuffer.allocate(bufferSize); - this.outBuf.position(bufferSize); - } - - @Override - public int decompress(byte[] b, int off, int len) throws IOException { - if (outBuf.hasRemaining()) { - int remaining = outBuf.remaining(), n = Math.min(remaining, len); - outBuf.get(b, off, n); - return n; - } - if (inBuf.position() > 0) { - inBuf.flip(); - int remaining = inBuf.remaining(); - inLen -= remaining; - // This is pretty ugly. I don't see how to do it better. Stream to byte buffers back to - // stream back to byte buffers... if only XZ for Java had a public block compression API. - // It does not. LZMA decompression speed is reasonably good, so inefficiency here is a - // shame. - // Perhaps we could look at using reflection to make package protected classes for block - // compression in XZ for Java accessible here, that library can be expected to rarely - // change, if at all. - outBuf.clear(); - try (ByteBufferInputStream lowerIn = new ByteBufferInputStream(inBuf)) { - final byte[] buf = new byte[8192]; - try (LZMAInputStream in = new LZMAInputStream(lowerIn, ARRAY_CACHE)) { - int read; - do { - read = in.read(buf); - if (read > 0) { - outBuf.put(buf, 0, read); - } - } while (read > 0); - } - } - int written = outBuf.position(); - outBuf.flip(); - inBuf.clear(); - int n = Math.min(written, len); - outBuf.get(b, off, n); - return n; - } - finished = true; - return 0; - } - - @Override - public void end() { - } - - @Override - public boolean finished() { - return finished; - } - - @Override - public int getRemaining() { - return inLen; - } - - @Override - public boolean needsDictionary() { - return false; - } - - @Override - public void reset() { - inBuf.clear(); - inLen = 0; - outBuf.clear(); - outBuf.position(outBuf.capacity()); - finished = false; - } - - @Override - public boolean needsInput() { - return inBuf.position() == 0; - } - - @Override - public void setDictionary(byte[] b, int off, int len) { - throw new UnsupportedOperationException("setDictionary is not supported"); - } - - @Override - public void setInput(byte[] b, int off, int len) { - if (inBuf.remaining() < len) { - // Get a new buffer that can accomodate the accumulated input plus the additional - // input that would cause a buffer overflow without reallocation. - // This condition should be fortunately rare, because it is expensive. - int needed = CompressionUtil.roundInt2(inBuf.capacity() + len); - ByteBuffer newBuf = ByteBuffer.allocate(needed); - inBuf.flip(); - newBuf.put(inBuf); - inBuf = newBuf; - } - inBuf.put(b, off, len); - inLen += len; - finished = false; - } - -} diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java deleted file mode 100644 index 734740635084..000000000000 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.compress.xz; - -import static org.junit.Assert.assertTrue; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.compress.HFileTestBase; -import org.apache.hadoop.hbase.testclassification.IOTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ IOTests.class, SmallTests.class }) -public class TestHFileCompressionLzma extends HFileTestBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileCompressionLzma.class); - - private static Configuration conf; - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - conf = TEST_UTIL.getConfiguration(); - conf.set(Compression.LZMA_CODEC_CLASS_KEY, LzmaCodec.class.getCanonicalName()); - Compression.Algorithm.LZMA.reload(conf); - HFileTestBase.setUpBeforeClass(); - } - - @Test - public void test() throws Exception { - Path path = - new Path(TEST_UTIL.getDataTestDir(), HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); - doTest(conf, path, Compression.Algorithm.LZMA); - } - - @Test - public void testReconfLevels() throws Exception { - Path path_1 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); - Path path_2 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); - conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1); - doTest(conf, path_1, Compression.Algorithm.LZMA); - long len_1 = FS.getFileStatus(path_1).getLen(); - conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 9); - doTest(conf, path_2, Compression.Algorithm.LZMA); - long len_2 = FS.getFileStatus(path_2).getLen(); - LOG.info("Level 1 len {}", len_1); - LOG.info("Level 9 len {}", len_2); - assertTrue("Reconfiguraton with LZMA_LEVEL_KEY did not seem to work", len_1 > len_2); - } - -} diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java deleted file mode 100644 index e5320da16777..000000000000 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.compress.xz; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.io.compress.CompressionTestBase; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -public class TestLzmaCodec extends CompressionTestBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLzmaCodec.class); - - @Test - public void testLzmaCodecSmall() throws Exception { - codecSmallTest(new LzmaCodec()); - } - - @Test - public void testLzmaCodecLarge() throws Exception { - codecLargeTest(new LzmaCodec(), 1.1); // poor compressability - codecLargeTest(new LzmaCodec(), 2); - codecLargeTest(new LzmaCodec(), 10); // very high compressability - } - - @Test - public void testLzmaCodecVeryLarge() throws Exception { - Configuration conf = new Configuration(); - // LZMA levels range from 1 to 9. - // Level 9 might take several minutes to complete. 3 is our default. 1 will be fast. - conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1); - LzmaCodec codec = new LzmaCodec(); - codec.setConf(conf); - codecVeryLargeTest(codec, 3); // like text - } - -} diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java deleted file mode 100644 index aa74926cb819..000000000000 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.compress.xz; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.wal.CompressedWALTestBase; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; - -@Category({ RegionServerTests.class, MediumTests.class }) -public class TestWALCompressionLzma extends CompressedWALTestBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALCompressionLzma.class); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - conf.set(Compression.LZMA_CODEC_CLASS_KEY, LzmaCodec.class.getCanonicalName()); - Compression.Algorithm.LZMA.reload(conf); - conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); - conf.setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true); - conf.set(CompressionContext.WAL_VALUE_COMPRESSION_TYPE, Compression.Algorithm.LZMA.getName()); - TEST_UTIL.startMiniDFSCluster(3); - } - - @AfterClass - public static void tearDown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - -} diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml index 4b99b3dd34b9..c2e4633b3987 100644 --- a/hbase-compression/pom.xml +++ b/hbase-compression/pom.xml @@ -36,7 +36,6 @@ hbase-compression-brotli hbase-compression-lz4 hbase-compression-snappy - hbase-compression-xz hbase-compression-zstd diff --git a/pom.xml b/pom.xml index 6cecebd50af9..fc76a7ff0f56 100644 --- a/pom.xml +++ b/pom.xml @@ -917,7 +917,6 @@ 1.11.0 1.8.0 1.1.10.4 - 1.9 1.5.5-2 - 3.25.2 + 4.26.1 diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 39ecccc0015d..2c73844b7c02 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -34,7 +34,7 @@ - 3.25.2 + 4.26.1 3.10.6.Final - 4.1.100.Final + 4.1.108.Final 0.13.0 - 2.16.1 - 2.16.1 + 2.17.0 + 2.17.0 2.3.1 3.1.0 2.1.1 @@ -924,7 +924,7 @@ databind] must be kept in sync with the version of jackson-jaxrs-json-provider shipped in hbase-thirdparty. --> - 4.1.6 + 4.1.7 0.8.8 From aaeef2db6accaf6d5301400bbf79e28c020469db Mon Sep 17 00:00:00 2001 From: chaijunjie0101 <64140218+chaijunjie0101@users.noreply.github.com> Date: Sun, 21 Apr 2024 19:39:32 +0800 Subject: [PATCH 328/514] HBASE-28215 CreateTableProcedure and DeleteTableProcedure should sleep a while before retrying (#5502) Signed-off-by: Duo Zhang --- .../procedure/CreateTableProcedure.java | 24 ++++- .../procedure/DeleteTableProcedure.java | 24 ++++- ...BadMasterObserverForCreateDeleteTable.java | 55 ++++++++++++ ...stCreateDeleteTableProcedureWithRetry.java | 88 +++++++++++++++++++ .../procedure/TestCreateTableProcedure.java | 4 +- 5 files changed, 190 insertions(+), 5 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 17998fec7bd7..23ad3b42aef0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -35,12 +35,15 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerValidationUtils; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,6 +54,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @InterfaceAudience.Private public class CreateTableProcedure extends AbstractStateMachineTableProcedure { @@ -60,6 +64,7 @@ public class CreateTableProcedure extends AbstractStateMachineTableProcedure newRegions; + private RetryCounter retryCounter; public CreateTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -80,7 +85,7 @@ public CreateTableProcedure(final MasterProcedureEnv env, final TableDescriptor @Override protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) - throws InterruptedException { + throws InterruptedException, ProcedureSuspendedException { LOG.info("{} execute state={}", this, state); try { switch (state) { @@ -131,6 +136,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS break; case CREATE_TABLE_POST_OPERATION: postCreate(env); + retryCounter = null; return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); @@ -139,12 +145,26 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS if (isRollbackSupported(state)) { setFailure("master-create-table", e); } else { - LOG.warn("Retriable error trying to create table=" + getTableName() + " state=" + state, e); + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.warn("Retriable error trying to create table={},state={},suspend {}secs.", + getTableName(), state, backoff / 1000, e); + throw suspend(Math.toIntExact(backoff), true); } } + retryCounter = null; return Flow.HAS_MORE_STATE; } + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + @Override protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 80fb5d0534d4..8c2f1067c952 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -41,9 +41,12 @@ import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,6 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @InterfaceAudience.Private public class DeleteTableProcedure extends AbstractStateMachineTableProcedure { @@ -61,6 +65,7 @@ public class DeleteTableProcedure extends AbstractStateMachineTableProcedure regions; private TableName tableName; + private RetryCounter retryCounter; public DeleteTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -79,7 +84,7 @@ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableN @Override protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) - throws InterruptedException { + throws InterruptedException, ProcedureSuspendedException { if (LOG.isTraceEnabled()) { LOG.trace(this + " execute state=" + state); } @@ -124,6 +129,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s break; case DELETE_TABLE_POST_OPERATION: postDelete(env); + retryCounter = null; LOG.debug("Finished {}", this); return Flow.NO_MORE_STATE; default: @@ -133,12 +139,26 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s if (isRollbackSupported(state)) { setFailure("master-delete-table", e); } else { - LOG.warn("Retriable error trying to delete table=" + getTableName() + " state=" + state, e); + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.warn("Retriable error trying to delete table={},state={},suspend {}secs.", + getTableName(), state, backoff / 1000, e); + throw suspend(Math.toIntExact(backoff), true); } } + retryCounter = null; return Flow.HAS_MORE_STATE; } + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + @Override protected boolean abort(MasterProcedureEnv env) { // TODO: Current behavior is: with no rollback and no abort support, procedure may get stuck diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java new file mode 100644 index 000000000000..454a24e198aa --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; + +/** + * A bad Master Observer to prevent user to create/delete table once. + */ +public class BadMasterObserverForCreateDeleteTable implements MasterObserver, MasterCoprocessor { + private boolean createFailedOnce = false; + private boolean deleteFailedOnce = false; + + @Override + public void postCompletedCreateTableAction(ObserverContext ctx, + TableDescriptor desc, RegionInfo[] regions) throws IOException { + if (!createFailedOnce && !desc.getTableName().isSystemTable()) { + createFailedOnce = true; + throw new IOException("execute postCompletedCreateTableAction failed once."); + } + } + + @Override + public void postCompletedDeleteTableAction(ObserverContext ctx, + TableName tableName) throws IOException { + if (!deleteFailedOnce && !tableName.isSystemTable()) { + deleteFailedOnce = true; + throw new IOException("execute postCompletedDeleteTableAction failed once."); + } + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java new file mode 100644 index 000000000000..3491aa639844 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.BadMasterObserverForCreateDeleteTable; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestCreateDeleteTableProcedureWithRetry { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCreateDeleteTableProcedureWithRetry.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static final TableName TABLE_NAME = + TableName.valueOf(TestCreateDeleteTableProcedureWithRetry.class.getSimpleName()); + + private static final String CF = "cf"; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + BadMasterObserverForCreateDeleteTable.class.getName()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testCreateDeleteTableRetry() throws IOException { + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TABLE_NAME, CF); + RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null); + CreateTableProcedure createProc = + new CreateTableProcedure(procExec.getEnvironment(), htd, regions); + ProcedureTestingUtility.submitAndWait(procExec, createProc); + Assert.assertTrue(UTIL.getAdmin().tableExists(TABLE_NAME)); + MasterProcedureTestingUtility.validateTableCreation(UTIL.getMiniHBaseCluster().getMaster(), + TABLE_NAME, regions, CF); + + UTIL.getAdmin().disableTable(TABLE_NAME); + DeleteTableProcedure deleteProc = + new DeleteTableProcedure(procExec.getEnvironment(), TABLE_NAME); + ProcedureTestingUtility.submitAndWait(procExec, deleteProc); + Assert.assertFalse(UTIL.getAdmin().tableExists(TABLE_NAME)); + MasterProcedureTestingUtility.validateTableDeletion(UTIL.getMiniHBaseCluster().getMaster(), + TABLE_NAME); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index bed41f4da86c..0bb54cc190e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; @@ -244,7 +245,8 @@ public CreateTableProcedureOnHDFSFailure(final MasterProcedureEnv env, @Override protected Flow executeFromState(MasterProcedureEnv env, - MasterProcedureProtos.CreateTableState state) throws InterruptedException { + MasterProcedureProtos.CreateTableState state) + throws InterruptedException, ProcedureSuspendedException { if ( !failOnce && state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT From 8b5ccda02f80037f32a43b5423d2e82edfeaa409 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 21 Apr 2024 19:40:07 +0800 Subject: [PATCH 329/514] HASE-28414 create-release should spotless:apply after making any file changes (#5824) Signed-off-by: Bryan Beaudreault --- dev-support/create-release/release-build.sh | 6 ++++-- dev-support/create-release/release-util.sh | 11 +++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index f3d8798be462..6cc855c97259 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -146,7 +146,8 @@ if [[ "$1" == "tag" ]]; then # Create release version maven_set_version "$RELEASE_VERSION" - find . -name pom.xml -exec git add {} \; + maven_spotless_apply + git_add_poms # Always put CHANGES.md and RELEASENOTES.md to parent directory, so later we do not need to # check their position when generating release data. We can not put them under the source code # directory because for 3.x+, CHANGES.md and RELEASENOTES.md are not tracked so later when @@ -168,7 +169,8 @@ if [[ "$1" == "tag" ]]; then # Create next version maven_set_version "$NEXT_VERSION" - find . -name pom.xml -exec git add {} \; + maven_spotless_apply + git_add_poms git commit -s -m "Preparing development version $NEXT_VERSION" if ! is_dry_run; then diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 3a1b38644f85..a33319fd3614 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -871,3 +871,14 @@ function get_hadoop3_version() { echo "${version}-hadoop3" fi } + +# Run mvn spotless:apply to format the code base +# For 2.x, the generated CHANGES.md and RELEASENOTES.md may have lines end with whitespace and +# case spotless:check failure, so we should run spotless:apply before committing +function maven_spotless_apply() { + "${MVN[@]}" spotless:apply +} + +function git_add_poms() { + find . -name pom.xml -exec git add {} \; +} From c73f8b51cba518984c48f8ead64b9cde395c54c3 Mon Sep 17 00:00:00 2001 From: chandrasekhar-188k <154109917+chandrasekhar-188k@users.noreply.github.com> Date: Sun, 21 Apr 2024 17:15:06 +0530 Subject: [PATCH 330/514] HBASE-28497 Missing fields in Get.toJSON (#5800) Signed-off-by: Duo Zhang Signed-off-by: Pankaj Kumar --- .../org/apache/hadoop/hbase/client/Get.java | 21 ++++++ .../hadoop/hbase/client/TestOperation.java | 72 +++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 617f67b9a87a..6cf036d48427 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -27,6 +27,7 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; +import java.util.stream.Collectors; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; @@ -430,6 +431,26 @@ public Map toMap(int maxCols) { if (getId() != null) { map.put("id", getId()); } + map.put("storeLimit", this.storeLimit); + map.put("storeOffset", this.storeOffset); + map.put("checkExistenceOnly", this.checkExistenceOnly); + + map.put("targetReplicaId", this.targetReplicaId); + map.put("consistency", this.consistency); + map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); + if (!colFamTimeRangeMap.isEmpty()) { + Map> colFamTimeRangeMapStr = colFamTimeRangeMap.entrySet().stream() + .collect(Collectors.toMap((e) -> Bytes.toStringBinary(e.getKey()), e -> { + TimeRange value = e.getValue(); + List rangeList = new ArrayList<>(); + rangeList.add(value.getMin()); + rangeList.add(value.getMax()); + return rangeList; + })); + + map.put("colFamTimeRangeMap", colFamTimeRangeMapStr); + } + map.put("priority", getPriority()); return map; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 0bbe8399914e..9ac9a6c3ab91 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -504,4 +504,76 @@ public void testOperationSubClassMethodsAreBuilderStyle() { BuilderStyleTest.assertClassesAreBuilderStyle(classes); } + + /** + * Test the client Get Operations' JSON encoding to ensure that produced JSON is parseable and + * that the details are present and not corrupted. + * @throws IOException if the JSON conversion fails + */ + @Test + public void testGetOperationToJSON() throws IOException { + // produce a Scan Operation + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(5); + get.setMaxResultsPerColumnFamily(3); + get.setRowOffsetPerColumnFamily(8); + get.setCacheBlocks(true); + get.setMaxResultsPerColumnFamily(5); + get.setRowOffsetPerColumnFamily(9); + get.setCheckExistenceOnly(true); + get.setTimeRange(1000, 2000); + get.setFilter(SCV_FILTER); + get.setReplicaId(1); + get.setConsistency(Consistency.STRONG); + get.setLoadColumnFamiliesOnDemand(true); + get.setColumnFamilyTimeRange(FAMILY, 2000, 3000); + get.setPriority(10); + + // get its JSON representation, and parse it + String json = get.toJSON(); + Type typeOfHashMap = new TypeToken>() { + }.getType(); + Gson gson = new GsonBuilder().setLongSerializationPolicy(LongSerializationPolicy.STRING) + .setObjectToNumberStrategy(ToNumberPolicy.LONG_OR_DOUBLE).create(); + Map parsedJSON = gson.fromJson(json, typeOfHashMap); + // check for the row + assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + List familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Get.toJSON()", familyInfo); + assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); + assertEquals("Qualifier incorrect in Get.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); + + assertEquals("maxVersions incorrect in Get.toJSON()", 5L, parsedJSON.get("maxVersions")); + + assertEquals("storeLimit incorrect in Get.toJSON()", 5L, parsedJSON.get("storeLimit")); + assertEquals("storeOffset incorrect in Get.toJSON()", 9L, parsedJSON.get("storeOffset")); + + assertEquals("cacheBlocks incorrect in Get.toJSON()", true, parsedJSON.get("cacheBlocks")); + + List trList = (List) parsedJSON.get("timeRange"); + assertEquals("timeRange incorrect in Get.toJSON()", 2, trList.size()); + assertEquals("timeRange incorrect in Get.toJSON()", "1000", trList.get(0)); + assertEquals("timeRange incorrect in Get.toJSON()", "2000", trList.get(1)); + + Map colFamTimeRange = (Map) parsedJSON.get("colFamTimeRangeMap"); + assertEquals("colFamTimeRangeMap incorrect in Get.toJSON()", 1L, colFamTimeRange.size()); + List testFamily = (List) colFamTimeRange.get("testFamily"); + assertEquals("colFamTimeRangeMap incorrect in Get.toJSON()", 2L, testFamily.size()); + assertEquals("colFamTimeRangeMap incorrect in Get.toJSON()", "2000", testFamily.get(0)); + assertEquals("colFamTimeRangeMap incorrect in Get.toJSON()", "3000", testFamily.get(1)); + + assertEquals("targetReplicaId incorrect in Get.toJSON()", 1L, + parsedJSON.get("targetReplicaId")); + assertEquals("consistency incorrect in Get.toJSON()", "STRONG", parsedJSON.get("consistency")); + assertEquals("loadColumnFamiliesOnDemand incorrect in Get.toJSON()", true, + parsedJSON.get("loadColumnFamiliesOnDemand")); + + assertEquals("priority incorrect in Get.toJSON()", 10L, parsedJSON.get("priority")); + assertEquals("checkExistenceOnly incorrect in Get.toJSON()", true, + parsedJSON.get("checkExistenceOnly")); + + } } From 8936a19b933d98a7a9750e1f28c8bddd0095e358 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 21 Apr 2024 21:09:43 +0800 Subject: [PATCH 331/514] Revert "HBASE-28215 CreateTableProcedure and DeleteTableProcedure should sleep a while before retrying (#5502)" This reverts commit aaeef2db6accaf6d5301400bbf79e28c020469db. --- .../procedure/CreateTableProcedure.java | 24 +---- .../procedure/DeleteTableProcedure.java | 24 +---- ...BadMasterObserverForCreateDeleteTable.java | 55 ------------ ...stCreateDeleteTableProcedureWithRetry.java | 88 ------------------- .../procedure/TestCreateTableProcedure.java | 4 +- 5 files changed, 5 insertions(+), 190 deletions(-) delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 23ad3b42aef0..17998fec7bd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -35,15 +35,12 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; -import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; -import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerValidationUtils; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.ModifyRegionUtils; -import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,7 +51,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @InterfaceAudience.Private public class CreateTableProcedure extends AbstractStateMachineTableProcedure { @@ -64,7 +60,6 @@ public class CreateTableProcedure extends AbstractStateMachineTableProcedure newRegions; - private RetryCounter retryCounter; public CreateTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -85,7 +80,7 @@ public CreateTableProcedure(final MasterProcedureEnv env, final TableDescriptor @Override protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) - throws InterruptedException, ProcedureSuspendedException { + throws InterruptedException { LOG.info("{} execute state={}", this, state); try { switch (state) { @@ -136,7 +131,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS break; case CREATE_TABLE_POST_OPERATION: postCreate(env); - retryCounter = null; return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); @@ -145,26 +139,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS if (isRollbackSupported(state)) { setFailure("master-create-table", e); } else { - if (retryCounter == null) { - retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); - } - long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Retriable error trying to create table={},state={},suspend {}secs.", - getTableName(), state, backoff / 1000, e); - throw suspend(Math.toIntExact(backoff), true); + LOG.warn("Retriable error trying to create table=" + getTableName() + " state=" + state, e); } } - retryCounter = null; return Flow.HAS_MORE_STATE; } - @Override - protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { - setState(ProcedureProtos.ProcedureState.RUNNABLE); - env.getProcedureScheduler().addFront(this); - return false; - } - @Override protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 8c2f1067c952..80fb5d0534d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -41,12 +41,9 @@ import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; -import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; -import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +54,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @InterfaceAudience.Private public class DeleteTableProcedure extends AbstractStateMachineTableProcedure { @@ -65,7 +61,6 @@ public class DeleteTableProcedure extends AbstractStateMachineTableProcedure regions; private TableName tableName; - private RetryCounter retryCounter; public DeleteTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -84,7 +79,7 @@ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableN @Override protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) - throws InterruptedException, ProcedureSuspendedException { + throws InterruptedException { if (LOG.isTraceEnabled()) { LOG.trace(this + " execute state=" + state); } @@ -129,7 +124,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s break; case DELETE_TABLE_POST_OPERATION: postDelete(env); - retryCounter = null; LOG.debug("Finished {}", this); return Flow.NO_MORE_STATE; default: @@ -139,26 +133,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s if (isRollbackSupported(state)) { setFailure("master-delete-table", e); } else { - if (retryCounter == null) { - retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); - } - long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Retriable error trying to delete table={},state={},suspend {}secs.", - getTableName(), state, backoff / 1000, e); - throw suspend(Math.toIntExact(backoff), true); + LOG.warn("Retriable error trying to delete table=" + getTableName() + " state=" + state, e); } } - retryCounter = null; return Flow.HAS_MORE_STATE; } - @Override - protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { - setState(ProcedureProtos.ProcedureState.RUNNABLE); - env.getProcedureScheduler().addFront(this); - return false; - } - @Override protected boolean abort(MasterProcedureEnv env) { // TODO: Current behavior is: with no rollback and no abort support, procedure may get stuck diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java deleted file mode 100644 index 454a24e198aa..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.coprocessor; - -import java.io.IOException; -import java.util.Optional; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.TableDescriptor; - -/** - * A bad Master Observer to prevent user to create/delete table once. - */ -public class BadMasterObserverForCreateDeleteTable implements MasterObserver, MasterCoprocessor { - private boolean createFailedOnce = false; - private boolean deleteFailedOnce = false; - - @Override - public void postCompletedCreateTableAction(ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException { - if (!createFailedOnce && !desc.getTableName().isSystemTable()) { - createFailedOnce = true; - throw new IOException("execute postCompletedCreateTableAction failed once."); - } - } - - @Override - public void postCompletedDeleteTableAction(ObserverContext ctx, - TableName tableName) throws IOException { - if (!deleteFailedOnce && !tableName.isSystemTable()) { - deleteFailedOnce = true; - throw new IOException("execute postCompletedDeleteTableAction failed once."); - } - } - - @Override - public Optional getMasterObserver() { - return Optional.of(this); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java deleted file mode 100644 index 3491aa639844..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.procedure; - -import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.coprocessor.BadMasterObserverForCreateDeleteTable; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.ModifyRegionUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ MasterTests.class, MediumTests.class }) -public class TestCreateDeleteTableProcedureWithRetry { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCreateDeleteTableProcedureWithRetry.class); - - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - - private static final TableName TABLE_NAME = - TableName.valueOf(TestCreateDeleteTableProcedureWithRetry.class.getSimpleName()); - - private static final String CF = "cf"; - - @BeforeClass - public static void setUp() throws Exception { - Configuration conf = UTIL.getConfiguration(); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - BadMasterObserverForCreateDeleteTable.class.getName()); - UTIL.startMiniCluster(1); - } - - @AfterClass - public static void tearDown() throws Exception { - UTIL.shutdownMiniCluster(); - } - - @Test - public void testCreateDeleteTableRetry() throws IOException { - ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); - TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TABLE_NAME, CF); - RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null); - CreateTableProcedure createProc = - new CreateTableProcedure(procExec.getEnvironment(), htd, regions); - ProcedureTestingUtility.submitAndWait(procExec, createProc); - Assert.assertTrue(UTIL.getAdmin().tableExists(TABLE_NAME)); - MasterProcedureTestingUtility.validateTableCreation(UTIL.getMiniHBaseCluster().getMaster(), - TABLE_NAME, regions, CF); - - UTIL.getAdmin().disableTable(TABLE_NAME); - DeleteTableProcedure deleteProc = - new DeleteTableProcedure(procExec.getEnvironment(), TABLE_NAME); - ProcedureTestingUtility.submitAndWait(procExec, deleteProc); - Assert.assertFalse(UTIL.getAdmin().tableExists(TABLE_NAME)); - MasterProcedureTestingUtility.validateTableDeletion(UTIL.getMiniHBaseCluster().getMaster(), - TABLE_NAME); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index 0bb54cc190e3..bed41f4da86c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; @@ -245,8 +244,7 @@ public CreateTableProcedureOnHDFSFailure(final MasterProcedureEnv env, @Override protected Flow executeFromState(MasterProcedureEnv env, - MasterProcedureProtos.CreateTableState state) - throws InterruptedException, ProcedureSuspendedException { + MasterProcedureProtos.CreateTableState state) throws InterruptedException { if ( !failOnce && state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT From 5a404c49504edfef250b248228093823942c5e1b Mon Sep 17 00:00:00 2001 From: chaijunjie0101 <64140218+chaijunjie0101@users.noreply.github.com> Date: Sun, 21 Apr 2024 19:39:32 +0800 Subject: [PATCH 332/514] HBASE-28150 CreateTableProcedure and DeleteTableProcedure should sleep a while before retrying (#5502) Signed-off-by: Duo Zhang --- .../procedure/CreateTableProcedure.java | 24 ++++- .../procedure/DeleteTableProcedure.java | 24 ++++- ...BadMasterObserverForCreateDeleteTable.java | 55 ++++++++++++ ...stCreateDeleteTableProcedureWithRetry.java | 88 +++++++++++++++++++ .../procedure/TestCreateTableProcedure.java | 4 +- 5 files changed, 190 insertions(+), 5 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 17998fec7bd7..23ad3b42aef0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -35,12 +35,15 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerValidationUtils; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,6 +54,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @InterfaceAudience.Private public class CreateTableProcedure extends AbstractStateMachineTableProcedure { @@ -60,6 +64,7 @@ public class CreateTableProcedure extends AbstractStateMachineTableProcedure newRegions; + private RetryCounter retryCounter; public CreateTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -80,7 +85,7 @@ public CreateTableProcedure(final MasterProcedureEnv env, final TableDescriptor @Override protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) - throws InterruptedException { + throws InterruptedException, ProcedureSuspendedException { LOG.info("{} execute state={}", this, state); try { switch (state) { @@ -131,6 +136,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS break; case CREATE_TABLE_POST_OPERATION: postCreate(env); + retryCounter = null; return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); @@ -139,12 +145,26 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS if (isRollbackSupported(state)) { setFailure("master-create-table", e); } else { - LOG.warn("Retriable error trying to create table=" + getTableName() + " state=" + state, e); + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.warn("Retriable error trying to create table={},state={},suspend {}secs.", + getTableName(), state, backoff / 1000, e); + throw suspend(Math.toIntExact(backoff), true); } } + retryCounter = null; return Flow.HAS_MORE_STATE; } + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + @Override protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 80fb5d0534d4..8c2f1067c952 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -41,9 +41,12 @@ import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,6 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @InterfaceAudience.Private public class DeleteTableProcedure extends AbstractStateMachineTableProcedure { @@ -61,6 +65,7 @@ public class DeleteTableProcedure extends AbstractStateMachineTableProcedure regions; private TableName tableName; + private RetryCounter retryCounter; public DeleteTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -79,7 +84,7 @@ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableN @Override protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) - throws InterruptedException { + throws InterruptedException, ProcedureSuspendedException { if (LOG.isTraceEnabled()) { LOG.trace(this + " execute state=" + state); } @@ -124,6 +129,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s break; case DELETE_TABLE_POST_OPERATION: postDelete(env); + retryCounter = null; LOG.debug("Finished {}", this); return Flow.NO_MORE_STATE; default: @@ -133,12 +139,26 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s if (isRollbackSupported(state)) { setFailure("master-delete-table", e); } else { - LOG.warn("Retriable error trying to delete table=" + getTableName() + " state=" + state, e); + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.warn("Retriable error trying to delete table={},state={},suspend {}secs.", + getTableName(), state, backoff / 1000, e); + throw suspend(Math.toIntExact(backoff), true); } } + retryCounter = null; return Flow.HAS_MORE_STATE; } + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + @Override protected boolean abort(MasterProcedureEnv env) { // TODO: Current behavior is: with no rollback and no abort support, procedure may get stuck diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java new file mode 100644 index 000000000000..454a24e198aa --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/BadMasterObserverForCreateDeleteTable.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; + +/** + * A bad Master Observer to prevent user to create/delete table once. + */ +public class BadMasterObserverForCreateDeleteTable implements MasterObserver, MasterCoprocessor { + private boolean createFailedOnce = false; + private boolean deleteFailedOnce = false; + + @Override + public void postCompletedCreateTableAction(ObserverContext ctx, + TableDescriptor desc, RegionInfo[] regions) throws IOException { + if (!createFailedOnce && !desc.getTableName().isSystemTable()) { + createFailedOnce = true; + throw new IOException("execute postCompletedCreateTableAction failed once."); + } + } + + @Override + public void postCompletedDeleteTableAction(ObserverContext ctx, + TableName tableName) throws IOException { + if (!deleteFailedOnce && !tableName.isSystemTable()) { + deleteFailedOnce = true; + throw new IOException("execute postCompletedDeleteTableAction failed once."); + } + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java new file mode 100644 index 000000000000..3491aa639844 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateDeleteTableProcedureWithRetry.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.BadMasterObserverForCreateDeleteTable; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestCreateDeleteTableProcedureWithRetry { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCreateDeleteTableProcedureWithRetry.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static final TableName TABLE_NAME = + TableName.valueOf(TestCreateDeleteTableProcedureWithRetry.class.getSimpleName()); + + private static final String CF = "cf"; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + BadMasterObserverForCreateDeleteTable.class.getName()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testCreateDeleteTableRetry() throws IOException { + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TABLE_NAME, CF); + RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null); + CreateTableProcedure createProc = + new CreateTableProcedure(procExec.getEnvironment(), htd, regions); + ProcedureTestingUtility.submitAndWait(procExec, createProc); + Assert.assertTrue(UTIL.getAdmin().tableExists(TABLE_NAME)); + MasterProcedureTestingUtility.validateTableCreation(UTIL.getMiniHBaseCluster().getMaster(), + TABLE_NAME, regions, CF); + + UTIL.getAdmin().disableTable(TABLE_NAME); + DeleteTableProcedure deleteProc = + new DeleteTableProcedure(procExec.getEnvironment(), TABLE_NAME); + ProcedureTestingUtility.submitAndWait(procExec, deleteProc); + Assert.assertFalse(UTIL.getAdmin().tableExists(TABLE_NAME)); + MasterProcedureTestingUtility.validateTableDeletion(UTIL.getMiniHBaseCluster().getMaster(), + TABLE_NAME); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index bed41f4da86c..0bb54cc190e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; @@ -244,7 +245,8 @@ public CreateTableProcedureOnHDFSFailure(final MasterProcedureEnv env, @Override protected Flow executeFromState(MasterProcedureEnv env, - MasterProcedureProtos.CreateTableState state) throws InterruptedException { + MasterProcedureProtos.CreateTableState state) + throws InterruptedException, ProcedureSuspendedException { if ( !failOnce && state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT From e3761baec1158d617c46bbdf54725206544717e9 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 23 Apr 2024 16:05:26 +0800 Subject: [PATCH 333/514] HBASE-28436 Use connection url to specify the connection registry information (#5770) Signed-off-by: Istvan Toth Signed-off-by: Nick Dimiduk Reviewed-by: Bryan Beaudreault --- hbase-client/pom.xml | 5 + .../hbase/client/ConnectionFactory.java | 313 +++++++++++++++--- .../client/ConnectionRegistryFactory.java | 64 +++- .../client/ConnectionRegistryURIFactory.java | 42 +++ .../client/RpcConnectionRegistryCreator.java | 49 +++ .../client/ZKConnectionRegistryCreator.java | 52 +++ ....hbase.client.ConnectionRegistryURIFactory | 17 + ...stConnectionRegistryCreatorUriParsing.java | 157 +++++++++ .../client/ClusterConnectionFactory.java | 2 +- .../client/AbstractTestRegionLocator.java | 2 +- .../TestAsyncAdminWithRegionReplicas.java | 2 +- .../client/TestAsyncMetaRegionLocator.java | 3 +- .../client/TestAsyncNonMetaRegionLocator.java | 2 +- ...ncNonMetaRegionLocatorConcurrenyLimit.java | 2 +- .../hbase/client/TestAsyncRegionLocator.java | 2 +- ...stAsyncSingleRequestRpcRetryingCaller.java | 2 +- .../client/TestAsyncTableUseMetaReplicas.java | 3 +- ...riteWithDifferentConnectionRegistries.java | 177 ++++++++++ ...talogReplicaLoadBalanceSimpleSelector.java | 3 +- .../client/TestMetaRegionLocationCache.java | 3 +- 20 files changed, 828 insertions(+), 74 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java create mode 100644 hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBasicReadWriteWithDifferentConnectionRegistries.java diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index d4ee67f0511c..ea99023e959c 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -170,6 +170,11 @@ mockito-core test + + org.mockito + mockito-inline + test + org.hamcrest hamcrest-library diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 716fb4863fe8..f4ef4496dfcf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.lang.reflect.Constructor; +import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.Map; @@ -89,41 +90,55 @@ protected ConnectionFactory() { * instance. Typical usage: * *
-   * Connection connection = ConnectionFactory.createConnection();
-   * Table table = connection.getTable(TableName.valueOf("mytable"));
-   * try {
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
    *   table.get(...);
    *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
    * }
    * 
* * @return Connection object for conf */ public static Connection createConnection() throws IOException { - Configuration conf = HBaseConfiguration.create(); - return createConnection(conf, null, AuthUtil.loginClient(conf)); + return createConnection(HBaseConfiguration.create()); + } + + /** + * Create a new Connection instance using default HBaseConfiguration. Connection encapsulates all + * housekeeping for a connection to the cluster. All tables and interfaces created from returned + * connection share zookeeper connection, meta cache, and connections to region servers and + * masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
+   *   table.get(...);
+   *   ...
+   * }
+   * 
+ * + * @param connectionUri the connection uri for the hbase cluster + * @return Connection object for conf + */ + public static Connection createConnection(URI connectionUri) throws IOException { + return createConnection(connectionUri, HBaseConfiguration.create()); } /** * Create a new Connection instance using the passed conf instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces - * created from returned connection share zookeeper connection, meta cache, and connections to - * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * *
-   * Connection connection = ConnectionFactory.createConnection(conf);
-   * Table table = connection.getTable(TableName.valueOf("mytable"));
-   * try {
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
    *   table.get(...);
    *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
    * }
    * 
* @@ -137,20 +152,41 @@ public static Connection createConnection(Configuration conf) throws IOException /** * Create a new Connection instance using the passed conf instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces - * created from returned connection share zookeeper connection, meta cache, and connections to - * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * *
-   * Connection connection = ConnectionFactory.createConnection(conf);
-   * Table table = connection.getTable(TableName.valueOf("mytable"));
-   * try {
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
+   *   table.get(...);
+   *   ...
+   * }
+   * 
+ * + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @return Connection object for conf + */ + public static Connection createConnection(URI connectionUri, Configuration conf) + throws IOException { + return createConnection(connectionUri, conf, null, AuthUtil.loginClient(conf)); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
    *   table.get(...);
    *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
    * }
    * 
* @@ -166,20 +202,42 @@ public static Connection createConnection(Configuration conf, ExecutorService po /** * Create a new Connection instance using the passed conf instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces - * created from returned connection share zookeeper connection, meta cache, and connections to - * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * *
-   * Connection connection = ConnectionFactory.createConnection(conf);
-   * Table table = connection.getTable(TableName.valueOf("table1"));
-   * try {
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
+   *   table.get(...);
+   *   ...
+   * }
+   * 
+ * + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @param pool the thread pool to use for batch operations + * @return Connection object for conf + */ + public static Connection createConnection(URI connectionUri, Configuration conf, + ExecutorService pool) throws IOException { + return createConnection(connectionUri, conf, pool, AuthUtil.loginClient(conf)); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
    *   table.get(...);
    *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
    * }
    * 
* @@ -194,20 +252,42 @@ public static Connection createConnection(Configuration conf, User user) throws /** * Create a new Connection instance using the passed conf instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces - * created from returned connection share zookeeper connection, meta cache, and connections to - * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * *
-   * Connection connection = ConnectionFactory.createConnection(conf);
-   * Table table = connection.getTable(TableName.valueOf("table1"));
-   * try {
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
+   *   table.get(...);
+   *   ...
+   * }
+   * 
+ * + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @param user the user the connection is for + * @return Connection object for conf + */ + public static Connection createConnection(URI connectionUri, Configuration conf, User user) + throws IOException { + return createConnection(connectionUri, conf, null, user); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
    *   table.get(...);
    *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
    * }
    * 
* @@ -224,20 +304,43 @@ public static Connection createConnection(Configuration conf, ExecutorService po /** * Create a new Connection instance using the passed conf instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces - * created from returned connection share zookeeper connection, meta cache, and connections to - * region servers and masters.
+ * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
* The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * *
-   * Connection connection = ConnectionFactory.createConnection(conf);
-   * Table table = connection.getTable(TableName.valueOf("table1"));
-   * try {
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
+   *   table.get(...);
+   *   ...
+   * }
+   * 
+ * + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @param user the user the connection is for + * @param pool the thread pool to use for batch operations + * @return Connection object for conf + */ + public static Connection createConnection(URI connectionUri, Configuration conf, + ExecutorService pool, User user) throws IOException { + return createConnection(connectionUri, conf, pool, user, Collections.emptyMap()); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
    *   table.get(...);
    *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
    * }
    * 
* @@ -249,6 +352,37 @@ public static Connection createConnection(Configuration conf, ExecutorService po */ public static Connection createConnection(Configuration conf, ExecutorService pool, final User user, Map connectionAttributes) throws IOException { + return createConnection(null, conf, pool, user, connectionAttributes); + } + + /** + * Create a new Connection instance using the passed conf instance. Connection + * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces + * created from returned connection share zookeeper connection(if used), meta cache, and + * connections to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned connection + * instance. Typical usage: + * + *
+   * Connection connection = ConnectionFactory.createConnection(conf);
+   * Table table = connection.getTable(TableName.valueOf("table1"));
+   * try (Connection connection = ConnectionFactory.createConnection(conf);
+   *   Table table = connection.getTable(TableName.valueOf("table1"))) {
+   *   table.get(...);
+   *   ...
+   * }
+   * 
+ * + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @param user the user the connection is for + * @param pool the thread pool to use for batch operations + * @param connectionAttributes attributes to be sent along to server during connection establish + * @return Connection object for conf + */ + public static Connection createConnection(URI connectionUri, Configuration conf, + ExecutorService pool, final User user, Map connectionAttributes) + throws IOException { Class clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionOverAsyncConnection.class, Connection.class); if (clazz != ConnectionOverAsyncConnection.class) { @@ -263,7 +397,7 @@ public static Connection createConnection(Configuration conf, ExecutorService po throw new IOException(e); } } else { - return FutureUtils.get(createAsyncConnection(conf, user, connectionAttributes)) + return FutureUtils.get(createAsyncConnection(connectionUri, conf, user, connectionAttributes)) .toConnection(); } } @@ -277,6 +411,16 @@ public static CompletableFuture createAsyncConnection() { return createAsyncConnection(HBaseConfiguration.create()); } + /** + * Call {@link #createAsyncConnection(URI, Configuration)} using default HBaseConfiguration. + * @param connectionUri the connection uri for the hbase cluster + * @see #createAsyncConnection(URI, Configuration) + * @return AsyncConnection object wrapped by CompletableFuture + */ + public static CompletableFuture createAsyncConnection(URI connectionUri) { + return createAsyncConnection(connectionUri, HBaseConfiguration.create()); + } + /** * Call {@link #createAsyncConnection(Configuration, User)} using the given {@code conf} and a * User object created by {@link UserProvider}. The given {@code conf} will also be used to @@ -287,6 +431,21 @@ public static CompletableFuture createAsyncConnection() { * @see UserProvider */ public static CompletableFuture createAsyncConnection(Configuration conf) { + return createAsyncConnection(null, conf); + } + + /** + * Call {@link #createAsyncConnection(Configuration, User)} using the given {@code connectionUri}, + * {@code conf} and a User object created by {@link UserProvider}. The given {@code conf} will + * also be used to initialize the {@link UserProvider}. + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @return AsyncConnection object wrapped by CompletableFuture + * @see #createAsyncConnection(Configuration, User) + * @see UserProvider + */ + public static CompletableFuture createAsyncConnection(URI connectionUri, + Configuration conf) { User user; try { user = AuthUtil.loginClient(conf); @@ -295,7 +454,7 @@ public static CompletableFuture createAsyncConnection(Configura future.completeExceptionally(e); return future; } - return createAsyncConnection(conf, user); + return createAsyncConnection(connectionUri, conf, user); } /** @@ -315,7 +474,28 @@ public static CompletableFuture createAsyncConnection(Configura */ public static CompletableFuture createAsyncConnection(Configuration conf, final User user) { - return createAsyncConnection(conf, user, null); + return createAsyncConnection(null, conf, user); + } + + /** + * Create a new AsyncConnection instance using the passed {@code connectionUri}, {@code conf} and + * {@code user}. AsyncConnection encapsulates all housekeeping for a connection to the cluster. + * All tables and interfaces created from returned connection share zookeeper connection(if used), + * meta cache, and connections to region servers and masters. + *

+ * The caller is responsible for calling {@link AsyncConnection#close()} on the returned + * connection instance. + *

+ * Usually you should only create one AsyncConnection instance in your code and use it everywhere + * as it is thread safe. + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @param user the user the asynchronous connection is for + * @return AsyncConnection object wrapped by CompletableFuture + */ + public static CompletableFuture createAsyncConnection(URI connectionUri, + Configuration conf, final User user) { + return createAsyncConnection(connectionUri, conf, user, null); } /** @@ -336,9 +516,38 @@ public static CompletableFuture createAsyncConnection(Configura */ public static CompletableFuture createAsyncConnection(Configuration conf, final User user, Map connectionAttributes) { + return createAsyncConnection(null, conf, user, connectionAttributes); + } + + /** + * Create a new AsyncConnection instance using the passed {@code connectionUri}, {@code conf} and + * {@code user}. AsyncConnection encapsulates all housekeeping for a connection to the cluster. + * All tables and interfaces created from returned connection share zookeeper connection(if used), + * meta cache, and connections to region servers and masters. + *

+ * The caller is responsible for calling {@link AsyncConnection#close()} on the returned + * connection instance. + *

+ * Usually you should only create one AsyncConnection instance in your code and use it everywhere + * as it is thread safe. + * @param connectionUri the connection uri for the hbase cluster + * @param conf configuration + * @param user the user the asynchronous connection is for + * @param connectionAttributes attributes to be sent along to server during connection establish + * @return AsyncConnection object wrapped by CompletableFuture + */ + public static CompletableFuture createAsyncConnection(URI connectionUri, + Configuration conf, final User user, Map connectionAttributes) { return TraceUtil.tracedFuture(() -> { + ConnectionRegistry registry; + try { + registry = connectionUri != null + ? ConnectionRegistryFactory.create(connectionUri, conf, user) + : ConnectionRegistryFactory.create(conf, user); + } catch (Exception e) { + return FutureUtils.failedFuture(e); + } CompletableFuture future = new CompletableFuture<>(); - ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf, user); addListener(registry.getClusterId(), (clusterId, error) -> { if (error != null) { registry.close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index 415d46397b8f..5eef2c5f93e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -17,27 +17,77 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; - +import java.io.IOException; +import java.net.URI; +import java.util.ServiceLoader; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** - * Factory class to get the instance of configured connection registry. + * The entry point for creating a {@link ConnectionRegistry}. */ @InterfaceAudience.Private final class ConnectionRegistryFactory { + private static final Logger LOG = LoggerFactory.getLogger(ConnectionRegistryFactory.class); + + private static final ImmutableMap CREATORS; + static { + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (ConnectionRegistryURIFactory factory : ServiceLoader + .load(ConnectionRegistryURIFactory.class)) { + builder.put(factory.getScheme().toLowerCase(), factory); + } + // throw IllegalArgumentException if there are duplicated keys + CREATORS = builder.buildOrThrow(); + } + private ConnectionRegistryFactory() { } - /** Returns The connection registry implementation to use. */ - static ConnectionRegistry getRegistry(Configuration conf, User user) { + /** + * Returns the connection registry implementation to use, for the given connection url + * {@code uri}. + *

+ * We use {@link ServiceLoader} to load different implementations, and use the scheme of the given + * {@code uri} to select. And if there is no protocol specified, or we can not find a + * {@link ConnectionRegistryURIFactory} implementation for the given scheme, we will fallback to + * use the old way to create the {@link ConnectionRegistry}. Notice that, if fallback happens, the + * specified connection url {@code uri} will not take effect, we will load all the related + * configurations from the given Configuration instance {@code conf} + */ + static ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException { + if (StringUtils.isBlank(uri.getScheme())) { + LOG.warn("No scheme specified for {}, fallback to use old way", uri); + return create(conf, user); + } + ConnectionRegistryURIFactory creator = CREATORS.get(uri.getScheme().toLowerCase()); + if (creator == null) { + LOG.warn("No creator registered for {}, fallback to use old way", uri); + return create(conf, user); + } + return creator.create(uri, conf, user); + } + + /** + * Returns the connection registry implementation to use. + *

+ * This is used when we do not have a connection url, we will use the old way to load the + * connection registry, by checking the + * {@literal HConstants#CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY} configuration. + */ + static ConnectionRegistry create(Configuration conf, User user) { Class clazz = - conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class, - ConnectionRegistry.class); + conf.getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + RpcConnectionRegistry.class, ConnectionRegistry.class); return ReflectionUtils.newInstance(clazz, conf, user); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java new file mode 100644 index 000000000000..ab2037a1c138 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.net.URI; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * For creating different {@link ConnectionRegistry} implementation. + */ +@InterfaceAudience.Private +public interface ConnectionRegistryURIFactory { + + /** + * Instantiate the {@link ConnectionRegistry} using the given parameters. + */ + ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException; + + /** + * The scheme for this implementation. Used to register this URI factory to the + * {@link ConnectionRegistryFactory}. + */ + String getScheme(); +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java new file mode 100644 index 000000000000..cb2338b1429d --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.net.URI; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Connection registry creator implementation for creating {@link RpcConnectionRegistry}. + */ +@InterfaceAudience.Private +public class RpcConnectionRegistryCreator implements ConnectionRegistryURIFactory { + + private static final Logger LOG = LoggerFactory.getLogger(RpcConnectionRegistryCreator.class); + + @Override + public ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException { + assert getScheme().equals(uri.getScheme()); + LOG.debug("connect to hbase cluster with rpc bootstrap servers='{}'", uri.getAuthority()); + Configuration c = new Configuration(conf); + c.set(RpcConnectionRegistry.BOOTSTRAP_NODES, uri.getAuthority()); + return new RpcConnectionRegistry(c, user); + } + + @Override + public String getScheme() { + return "hbase+rpc"; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java new file mode 100644 index 000000000000..8aa51e04fe4d --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.net.URI; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Connection registry creator implementation for creating {@link ZKConnectionRegistry}. + */ +@InterfaceAudience.Private +public class ZKConnectionRegistryCreator implements ConnectionRegistryURIFactory { + + private static final Logger LOG = LoggerFactory.getLogger(ZKConnectionRegistryCreator.class); + + @Override + public ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException { + assert getScheme().equals(uri.getScheme()); + LOG.debug("connect to hbase cluster with zk quorum='{}' and parent='{}'", uri.getAuthority(), + uri.getPath()); + Configuration c = new Configuration(conf); + c.set(HConstants.CLIENT_ZOOKEEPER_QUORUM, uri.getAuthority()); + c.set(HConstants.ZOOKEEPER_ZNODE_PARENT, uri.getPath()); + return new ZKConnectionRegistry(c, user); + } + + @Override + public String getScheme() { + return "hbase+zk"; + } +} diff --git a/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory b/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory new file mode 100644 index 000000000000..b25a569776f1 --- /dev/null +++ b/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +org.apache.hadoop.hbase.client.RpcConnectionRegistryCreator +org.apache.hadoop.hbase.client.ZKConnectionRegistryCreator \ No newline at end of file diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java new file mode 100644 index 000000000000..4dabd894b5b4 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.mockStatic; + +import java.net.URI; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.ArgumentCaptor; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; + +/** + * Make sure we can successfully parse the URI component + */ +@Category({ ClientTests.class, SmallTests.class }) +public class TestConnectionRegistryCreatorUriParsing { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestConnectionRegistryCreatorUriParsing.class); + + private Configuration conf; + + private User user; + + private MockedConstruction mockedRpcRegistry; + + private MockedConstruction mockedZkRegistry; + + private MockedStatic mockedReflectionUtils; + + private List args; + + @Before + public void setUp() { + conf = HBaseConfiguration.create(); + user = mock(User.class); + args = null; + mockedRpcRegistry = mockConstruction(RpcConnectionRegistry.class, (mock, context) -> { + args = context.arguments(); + }); + mockedZkRegistry = mockConstruction(ZKConnectionRegistry.class, (mock, context) -> { + args = context.arguments(); + }); + mockedReflectionUtils = mockStatic(ReflectionUtils.class); + } + + @After + public void tearDown() { + mockedRpcRegistry.closeOnDemand(); + mockedZkRegistry.closeOnDemand(); + mockedReflectionUtils.closeOnDemand(); + } + + @Test + public void testParseRpcSingle() throws Exception { + ConnectionRegistryFactory.create(new URI("hbase+rpc://server1:123"), conf, user); + assertEquals(1, mockedRpcRegistry.constructed().size()); + assertSame(user, args.get(1)); + Configuration conf = (Configuration) args.get(0); + assertEquals("server1:123", conf.get(RpcConnectionRegistry.BOOTSTRAP_NODES)); + } + + @Test + public void testParseRpcMultiple() throws Exception { + ConnectionRegistryFactory.create(new URI("hbase+rpc://server1:123,server2:456,server3:789"), + conf, user); + assertEquals(1, mockedRpcRegistry.constructed().size()); + assertSame(user, args.get(1)); + Configuration conf = (Configuration) args.get(0); + assertEquals("server1:123,server2:456,server3:789", + conf.get(RpcConnectionRegistry.BOOTSTRAP_NODES)); + } + + @Test + public void testParseZkSingle() throws Exception { + ConnectionRegistryFactory.create(new URI("hbase+zk://server1:123/root"), conf, user); + assertEquals(1, mockedZkRegistry.constructed().size()); + assertSame(user, args.get(1)); + Configuration conf = (Configuration) args.get(0); + assertEquals("server1:123", conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM)); + assertEquals("/root", conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + } + + @Test + public void testParseZkMultiple() throws Exception { + ConnectionRegistryFactory + .create(new URI("hbase+zk://server1:123,server2:456,server3:789/root/path"), conf, user); + assertEquals(1, mockedZkRegistry.constructed().size()); + assertSame(user, args.get(1)); + Configuration conf = (Configuration) args.get(0); + assertEquals("server1:123,server2:456,server3:789", + conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM)); + assertEquals("/root/path", conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + } + + @Test + public void testFallbackNoScheme() throws Exception { + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class, + ConnectionRegistry.class); + ConnectionRegistryFactory.create(new URI("server1:2181/path"), conf, user); + ArgumentCaptor> clazzCaptor = ArgumentCaptor.forClass(Class.class); + ArgumentCaptor argsCaptor = ArgumentCaptor.forClass(Object[].class); + mockedReflectionUtils + .verify(() -> ReflectionUtils.newInstance(clazzCaptor.capture(), argsCaptor.capture())); + assertEquals(ZKConnectionRegistry.class, clazzCaptor.getValue()); + assertSame(conf, argsCaptor.getValue()[0]); + assertSame(user, argsCaptor.getValue()[1]); + } + + @Test + public void testFallbackNoCreator() throws Exception { + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class, + ConnectionRegistry.class); + ConnectionRegistryFactory.create(new URI("hbase+tls://server1:123/path"), conf, user); + ArgumentCaptor> clazzCaptor = ArgumentCaptor.forClass(Class.class); + ArgumentCaptor argsCaptor = ArgumentCaptor.forClass(Object[].class); + mockedReflectionUtils + .verify(() -> ReflectionUtils.newInstance(clazzCaptor.capture(), argsCaptor.capture())); + assertEquals(RpcConnectionRegistry.class, clazzCaptor.getValue()); + assertSame(conf, argsCaptor.getValue()[0]); + assertSame(user, argsCaptor.getValue()[1]); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java index 7225f92b7ff9..ed90863763a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java @@ -64,7 +64,7 @@ private static AsyncClusterConnection createAsyncClusterConnection(Configuration */ public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf, SocketAddress localAddress, User user) throws IOException { - return createAsyncClusterConnection(conf, ConnectionRegistryFactory.getRegistry(conf, user), + return createAsyncClusterConnection(conf, ConnectionRegistryFactory.create(conf, user), localAddress, user); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index 0ff105743e0c..031dff736c84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -60,7 +60,7 @@ protected static void startClusterAndCreateTable() throws Exception { UTIL.getAdmin().createTable(td, SPLIT_KEYS); UTIL.waitTableAvailable(TABLE_NAME); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration(), User.getCurrent())) { + ConnectionRegistryFactory.create(UTIL.getConfiguration(), User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index da400f29c0c6..bb0eb31d2549 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -56,7 +56,7 @@ public static void setUpBeforeClass() throws Exception { TestAsyncAdminBase.setUpBeforeClass(); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent())) { + ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index 90d2cb51e8cf..e14cd32a3889 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -107,8 +107,7 @@ protected void before() throws Throwable { testUtil = miniClusterRule.getTestingUtility(); HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); testUtil.waitUntilNoRegionsInTransition(); - registry = - ConnectionRegistryFactory.getRegistry(testUtil.getConfiguration(), User.getCurrent()); + registry = ConnectionRegistryFactory.create(testUtil.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(testUtil, registry); admin.balancerSwitch(false).get(); locator = new AsyncMetaRegionLocator(registry); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index a6d0ab81f912..6a5230b3a128 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -128,7 +128,7 @@ public void setUpBeforeTest() throws InterruptedException, ExecutionException, I // Enable meta replica LoadBalance mode for this connection. c.set(RegionLocator.LOCATOR_META_REPLICAS_MODE, metaReplicaMode.toString()); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); + ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); conn = new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, User.getCurrent()); locator = new AsyncNonMetaRegionLocator(conn); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 50c9ab9f5657..439d527effca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -125,7 +125,7 @@ public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); TEST_UTIL.getAdmin().balancerSwitch(false, true); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); + ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), null, User.getCurrent()); LOCATOR = new AsyncNonMetaRegionLocator(CONN); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index bacd7bb32d70..2291c28a7c85 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -100,7 +100,7 @@ public static void setUp() throws Exception { TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAME); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); + ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), null, User.getCurrent()); LOCATOR = CONN.getLocator(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index 3c8327145f32..baa4ee74ade9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -73,7 +73,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAME); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); + ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), null, User.getCurrent()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 0de59a4c32bf..2803db20e710 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -95,8 +95,7 @@ public static void setUp() throws Exception { FailPrimaryMetaScanCp.class.getName()); UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); - try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(conf, User.getCurrent())) { + try (ConnectionRegistry registry = ConnectionRegistryFactory.create(conf, User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBasicReadWriteWithDifferentConnectionRegistries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBasicReadWriteWithDifferentConnectionRegistries.java new file mode 100644 index 000000000000..5746ffa67f6c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBasicReadWriteWithDifferentConnectionRegistries.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertFalse; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test basic read write operation with different {@link ConnectionRegistry} implementations. + */ +@RunWith(Parameterized.class) +@Category({ MediumTests.class, ClientTests.class }) +public class TestBasicReadWriteWithDifferentConnectionRegistries { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBasicReadWriteWithDifferentConnectionRegistries.class); + + private static final Logger LOG = + LoggerFactory.getLogger(TestBasicReadWriteWithDifferentConnectionRegistries.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + public enum RegistryImpl { + ZK, + RPC, + ZK_URI, + RPC_URI + } + + @Parameter + public RegistryImpl impl; + + @Rule + public final TableNameTestRule name = new TableNameTestRule(); + + private byte[] FAMILY = Bytes.toBytes("family"); + + private Connection conn; + + @Parameters(name = "{index}: impl={0}") + public static List data() { + List data = new ArrayList(); + for (RegistryImpl impl : RegistryImpl.values()) { + data.add(new Object[] { impl }); + } + return data; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + UTIL.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + switch (impl) { + case ZK: { + Configuration conf = HBaseConfiguration.create(); + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + ZKConnectionRegistry.class, ConnectionRegistry.class); + String quorum = UTIL.getZkCluster().getAddress().toString(); + String path = UTIL.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT); + conf.set(HConstants.CLIENT_ZOOKEEPER_QUORUM, quorum); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, path); + LOG.info("connect to cluster through zk quorum={} and parent={}", quorum, path); + conn = ConnectionFactory.createConnection(conf); + break; + } + case RPC: { + Configuration conf = HBaseConfiguration.create(); + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + RpcConnectionRegistry.class, ConnectionRegistry.class); + String bootstrapServers = + UTIL.getMiniHBaseCluster().getMaster().getServerName().getAddress().toString(); + conf.set(RpcConnectionRegistry.BOOTSTRAP_NODES, bootstrapServers); + LOG.info("connect to cluster through rpc bootstrap servers={}", bootstrapServers); + conn = ConnectionFactory.createConnection(conf); + break; + } + case ZK_URI: { + String quorum = UTIL.getZkCluster().getAddress().toString(); + String path = UTIL.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT); + URI connectionUri = new URI("hbase+zk://" + quorum + path); + LOG.info("connect to cluster through connection url: {}", connectionUri); + conn = ConnectionFactory.createConnection(connectionUri); + break; + } + case RPC_URI: { + URI connectionUri = new URI("hbase+rpc://" + + UTIL.getMiniHBaseCluster().getMaster().getServerName().getAddress().toString()); + LOG.info("connect to cluster through connection url: {}", connectionUri); + conn = ConnectionFactory.createConnection(connectionUri); + break; + } + default: + throw new IllegalArgumentException("Unknown impl: " + impl); + } + try (Admin admin = conn.getAdmin()) { + admin.createTable(TableDescriptorBuilder.newBuilder(name.getTableName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build()); + } + } + + @After + public void tearDown() throws Exception { + TableName tableName = name.getTableName(); + try (Admin admin = conn.getAdmin()) { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } + conn.close(); + } + + @Test + public void testReadWrite() throws Exception { + byte[] row = Bytes.toBytes("row"); + byte[] qualifier = Bytes.toBytes("qualifier"); + byte[] value = Bytes.toBytes("value"); + try (Table table = conn.getTable(name.getTableName())) { + Put put = new Put(row).addColumn(FAMILY, qualifier, value); + table.put(put); + Result result = table.get(new Get(row)); + assertArrayEquals(value, result.getValue(FAMILY, qualifier)); + table.delete(new Delete(row)); + assertFalse(table.exists(new Get(row))); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 5c78e53f7e60..12f278ebbfd7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -77,8 +77,7 @@ public static void setUp() throws Exception { () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() >= numOfMetaReplica); - registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); + registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(), null, User.getCurrent()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index beb054eaf366..29223dea5dbe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -64,8 +64,7 @@ public class TestMetaRegionLocationCache { public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); - REGISTRY = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration(), User.getCurrent()); + REGISTRY = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); TEST_UTIL.getAdmin().balancerSwitch(false, true); } From 6c6e776eea6ebd62a3a030a1820c4eef2636553c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 23 Apr 2024 21:57:47 +0800 Subject: [PATCH 334/514] HBASE-28529 Use ZKClientConfig instead of system properties when setting zookeeper configurations (#5835) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Wellington Chevreuil Reviewed-by: Andor Molnár Reviewed-by: BukrosSzabolcs --- .../hbase/zookeeper/ReadOnlyZKClient.java | 13 +++- .../hadoop/hbase/zookeeper/ZKConfig.java | 29 ++++--- .../hadoop/hbase/zookeeper/TestZKConfig.java | 47 +----------- .../hbase/zookeeper/RecoverableZooKeeper.java | 75 ++++++++----------- .../hadoop/hbase/zookeeper/ZKWatcher.java | 4 +- .../zookeeper/TestRecoverableZooKeeper.java | 2 +- 6 files changed, 62 insertions(+), 108 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 979094fda80b..64b151dc19a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -38,6 +38,7 @@ import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.Code; import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.client.ZKClientConfig; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,6 +76,8 @@ public final class ReadOnlyZKClient implements Closeable { private final int keepAliveTimeMs; + private final ZKClientConfig zkClientConfig; + private static abstract class Task implements Delayed { protected long time = System.nanoTime(); @@ -136,10 +139,12 @@ public ReadOnlyZKClient(Configuration conf) { this.retryIntervalMs = conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); + this.zkClientConfig = ZKConfig.getZKClientConfig(conf); LOG.debug( - "Connect {} to {} with session timeout={}ms, retries {}, " - + "retry interval {}ms, keepAlive={}ms", - getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs); + "Connect {} to {} with session timeout={}ms, retries={}, " + + "retry interval={}ms, keepAlive={}ms, zk client config={}", + getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs, + zkClientConfig); Threads.setDaemonThreadRunning(new Thread(this::run), "ReadOnlyZKClient-" + connectString + "@" + getId()); } @@ -316,7 +321,7 @@ private ZooKeeper getZk() throws IOException { // may be closed when session expired if (zookeeper == null || !zookeeper.getState().isAlive()) { zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { - }); + }, zkClientConfig); } return zookeeper; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 87885e2b9fd5..57009eca660e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -26,19 +26,22 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.client.ZKClientConfig; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; /** * Utility methods for reading, and building the ZooKeeper configuration. The order and priority for - * reading the config are as follows: (1). Property with "hbase.zookeeper.property." prefix from - * HBase XML (2). other zookeeper related properties in HBASE XML + * reading the config are as follows: + *

    + *
  1. Property with "hbase.zookeeper.property." prefix from HBase XML.
  2. + *
  3. other zookeeper related properties in HBASE XML
  4. + *
*/ @InterfaceAudience.Private public final class ZKConfig { private static final String VARIABLE_START = "${"; - private static final String ZOOKEEPER_JAVA_PROPERTY_PREFIX = "zookeeper."; private ZKConfig() { } @@ -132,7 +135,6 @@ private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf * @return Quorum servers */ public static String getZKQuorumServersString(Configuration conf) { - setZooKeeperClientSystemProperties(HConstants.ZK_CFG_PROPERTY_PREFIX, conf); return getZKQuorumServersStringFromHbaseConfig(conf); } @@ -322,13 +324,19 @@ public String getZnodeParent() { } } + public static ZKClientConfig getZKClientConfig(Configuration conf) { + Properties zkProperties = extractZKPropsFromHBaseConfig(conf); + ZKClientConfig zkClientConfig = new ZKClientConfig(); + zkProperties.forEach((k, v) -> zkClientConfig.setProperty(k.toString(), v.toString())); + return zkClientConfig; + } + /** * Get the client ZK Quorum servers string * @param conf the configuration to read * @return Client quorum servers, or null if not specified */ public static String getClientZKQuorumServersString(Configuration conf) { - setZooKeeperClientSystemProperties(HConstants.ZK_CFG_PROPERTY_PREFIX, conf); String clientQuromServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); if (clientQuromServers == null) { return null; @@ -341,15 +349,4 @@ public static String getClientZKQuorumServersString(Configuration conf) { final String[] serverHosts = StringUtils.getStrings(clientQuromServers); return buildZKQuorumServerString(serverHosts, clientZkClientPort); } - - private static void setZooKeeperClientSystemProperties(String prefix, Configuration conf) { - Properties zkProperties = extractZKPropsFromHBaseConfig(conf); - for (Entry entry : zkProperties.entrySet()) { - String key = entry.getKey().toString().trim(); - String value = entry.getValue().toString().trim(); - if (System.getProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + key) == null) { - System.setProperty(ZOOKEEPER_JAVA_PROPERTY_PREFIX + key, value); - } - } - } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java index 63df9043bae3..2a7b7bc27683 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.zookeeper.client.ZKClientConfig; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -100,62 +101,22 @@ public void testClusterKeyWithMultiplePorts() throws Exception { } @Test - public void testZooKeeperTlsPropertiesClient() { + public void testZooKeeperTlsProperties() { // Arrange Configuration conf = HBaseConfiguration.create(); for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { conf.set(HConstants.ZK_CFG_PROPERTY_PREFIX + p, p); - String zkprop = "zookeeper." + p; - System.clearProperty(zkprop); } // Act - ZKConfig.getClientZKQuorumServersString(conf); + ZKClientConfig zkClientConfig = ZKConfig.getZKClientConfig(conf); // Assert for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { - String zkprop = "zookeeper." + p; - assertEquals("Invalid or unset system property: " + zkprop, p, System.getProperty(zkprop)); - System.clearProperty(zkprop); + assertEquals("Invalid or unset system property: " + p, p, zkClientConfig.getProperty(p)); } } - @Test - public void testZooKeeperTlsPropertiesServer() { - // Arrange - Configuration conf = HBaseConfiguration.create(); - for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { - conf.set(HConstants.ZK_CFG_PROPERTY_PREFIX + p, p); - String zkprop = "zookeeper." + p; - System.clearProperty(zkprop); - } - - // Act - ZKConfig.getZKQuorumServersString(conf); - - // Assert - for (String p : ZOOKEEPER_CLIENT_TLS_PROPERTIES) { - String zkprop = "zookeeper." + p; - assertEquals("Invalid or unset system property: " + zkprop, p, System.getProperty(zkprop)); - System.clearProperty(zkprop); - } - } - - @Test - public void testZooKeeperPropertiesDoesntOverwriteSystem() { - // Arrange - System.setProperty("zookeeper.a.b.c", "foo"); - Configuration conf = HBaseConfiguration.create(); - conf.set(HConstants.ZK_CFG_PROPERTY_PREFIX + "a.b.c", "bar"); - - // Act - ZKConfig.getZKQuorumServersString(conf); - - // Assert - assertEquals("foo", System.getProperty("zookeeper.a.b.c")); - System.clearProperty("zookeeper.a.b.c"); - } - private void testKey(String ensemble, int port, String znode) throws IOException { testKey(ensemble, port, znode, false); // not support multiple client ports } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index f1798b00e315..8537dd12c5b6 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -41,6 +41,7 @@ import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; +import org.apache.zookeeper.client.ZKClientConfig; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.proto.CreateRequest; @@ -49,19 +50,22 @@ import org.slf4j.LoggerFactory; /** - * A zookeeper that can handle 'recoverable' errors. To handle recoverable errors, developers need - * to realize that there are two classes of requests: idempotent and non-idempotent requests. Read - * requests and unconditional sets and deletes are examples of idempotent requests, they can be - * reissued with the same results. (Although, the delete may throw a NoNodeException on reissue its - * effect on the ZooKeeper state is the same.) Non-idempotent requests need special handling, - * application and library writers need to keep in mind that they may need to encode information in - * the data or name of znodes to detect retries. A simple example is a create that uses a sequence - * flag. If a process issues a create("/x-", ..., SEQUENCE) and gets a connection loss exception, - * that process will reissue another create("/x-", ..., SEQUENCE) and get back x-111. When the - * process does a getChildren("/"), it sees x-1,x-30,x-109,x-110,x-111, now it could be that x-109 - * was the result of the previous create, so the process actually owns both x-109 and x-111. An easy - * way around this is to use "x-process id-" when doing the create. If the process is using an id of - * 352, before reissuing the create it will do a getChildren("/") and see "x-222-1", "x-542-30", + * A zookeeper that can handle 'recoverable' errors. + *

+ * To handle recoverable errors, developers need to realize that there are two classes of requests: + * idempotent and non-idempotent requests. Read requests and unconditional sets and deletes are + * examples of idempotent requests, they can be reissued with the same results. + *

+ * (Although, the delete may throw a NoNodeException on reissue its effect on the ZooKeeper state is + * the same.) Non-idempotent requests need special handling, application and library writers need to + * keep in mind that they may need to encode information in the data or name of znodes to detect + * retries. A simple example is a create that uses a sequence flag. If a process issues a + * create("/x-", ..., SEQUENCE) and gets a connection loss exception, that process will reissue + * another create("/x-", ..., SEQUENCE) and get back x-111. When the process does a + * getChildren("/"), it sees x-1,x-30,x-109,x-110,x-111, now it could be that x-109 was the result + * of the previous create, so the process actually owns both x-109 and x-111. An easy way around + * this is to use "x-process id-" when doing the create. If the process is using an id of 352, + * before reissuing the create it will do a getChildren("/") and see "x-222-1", "x-542-30", * "x-352-109", x-333-110". The process will know that the original create succeeded an the znode it * created is "x-352-109". * @see "https://cwiki.apache.org/confluence/display/HADOOP2/ZooKeeper+ErrorHandling" @@ -79,37 +83,31 @@ public class RecoverableZooKeeper { private final int sessionTimeout; private final String quorumServers; private final int maxMultiSize; + private final ZKClientConfig zkClientConfig; /** - * See {@link #connect(Configuration, String, Watcher, String)} + * See {@link #connect(Configuration, String, Watcher, String, ZKClientConfig)}. */ public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) throws IOException { String ensemble = ZKConfig.getZKQuorumServersString(conf); - return connect(conf, ensemble, watcher); - } - - /** - * See {@link #connect(Configuration, String, Watcher, String)} - */ - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher) - throws IOException { - return connect(conf, ensemble, watcher, null); + return connect(conf, ensemble, watcher, null, null); } /** * Creates a new connection to ZooKeeper, pulling settings and ensemble config from the specified * configuration object using methods from {@link ZKConfig}. Sets the connection status monitoring * watcher to the specified watcher. - * @param conf configuration to pull ensemble and other settings from - * @param watcher watcher to monitor connection changes - * @param ensemble ZooKeeper servers quorum string - * @param identifier value used to identify this client instance. + * @param conf configuration to pull ensemble and other settings from + * @param watcher watcher to monitor connection changes + * @param ensemble ZooKeeper servers quorum string + * @param identifier value used to identify this client instance. + * @param zkClientConfig client specific configurations for this instance * @return connection to zookeeper * @throws IOException if unable to connect to zk or config problem */ public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher, - final String identifier) throws IOException { + final String identifier, ZKClientConfig zkClientConfig) throws IOException { if (ensemble == null) { throw new IOException("Unable to determine ZooKeeper ensemble"); } @@ -122,14 +120,12 @@ public static RecoverableZooKeeper connect(Configuration conf, String ensemble, int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000); int multiMaxSize = conf.getInt("zookeeper.multi.max.size", 1024 * 1024); return new RecoverableZooKeeper(ensemble, timeout, watcher, retry, retryIntervalMillis, - maxSleepTime, identifier, multiMaxSize); + maxSleepTime, identifier, multiMaxSize, zkClientConfig); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DE_MIGHT_IGNORE", - justification = "None. Its always been this way.") - public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, - int maxRetries, int retryIntervalMillis, int maxSleepTime, String identifier, int maxMultiSize) - throws IOException { + RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, + int retryIntervalMillis, int maxSleepTime, String identifier, int maxMultiSize, + ZKClientConfig zkClientConfig) throws IOException { // TODO: Add support for zk 'chroot'; we don't add it to the quorumServers String as we should. this.retryCounterFactory = new RetryCounterFactory(maxRetries + 1, retryIntervalMillis, maxSleepTime); @@ -147,12 +143,7 @@ public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher wa this.sessionTimeout = sessionTimeout; this.quorumServers = quorumServers; this.maxMultiSize = maxMultiSize; - - try { - checkZk(); - } catch (Exception x) { - /* ignore */ - } + this.zkClientConfig = zkClientConfig; } /** @@ -171,10 +162,10 @@ public int getMaxMultiSizeLimit() { * @return The created ZooKeeper connection object * @throws KeeperException if a ZooKeeper operation fails */ - protected synchronized ZooKeeper checkZk() throws KeeperException { + private synchronized ZooKeeper checkZk() throws KeeperException { if (this.zk == null) { try { - this.zk = new ZooKeeper(quorumServers, sessionTimeout, watcher); + this.zk = new ZooKeeper(quorumServers, sessionTimeout, watcher, zkClientConfig); } catch (IOException ex) { LOG.warn("Unable to create ZooKeeper Connection", ex); throw new KeeperException.OperationTimeoutException(); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index 8f0cfc811b85..3879cb7ba911 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -176,8 +176,8 @@ public ZKWatcher(Configuration conf, String identifier, Abortable abortable, this.abortable = abortable; this.znodePaths = new ZNodePaths(conf); PendingWatcher pendingWatcher = new PendingWatcher(); - this.recoverableZooKeeper = - RecoverableZooKeeper.connect(conf, quorum, pendingWatcher, identifier); + this.recoverableZooKeeper = RecoverableZooKeeper.connect(conf, quorum, pendingWatcher, + identifier, ZKConfig.getZKClientConfig(conf)); pendingWatcher.prepare(this); if (canCreateBaseZNode) { try { diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java index 1339b640cede..8ba5fd84479b 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java @@ -76,7 +76,7 @@ public void testSetDataVersionMismatchInLoop() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); ZKWatcher zkw = new ZKWatcher(conf, "testSetDataVersionMismatchInLoop", abortable, true); String ensemble = ZKConfig.getZKQuorumServersString(conf); - RecoverableZooKeeper rzk = RecoverableZooKeeper.connect(conf, ensemble, zkw); + RecoverableZooKeeper rzk = RecoverableZooKeeper.connect(conf, ensemble, zkw, null, null); rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); rzk.setData(znode, Bytes.toBytes("OPENING"), 0); Field zkField = RecoverableZooKeeper.class.getDeclaredField("zk"); From 7122da5978a558b8d1d31c5a20b95167f8bfbf8b Mon Sep 17 00:00:00 2001 From: Abhishek Kothalikar <99398985+kabhishek4@users.noreply.github.com> Date: Thu, 25 Apr 2024 15:28:03 +0530 Subject: [PATCH 335/514] HBASE-28517 Make properties dynamically configured (#5823) Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi --- .../hadoop/hbase/io/hfile/CacheConfig.java | 21 +++++++++++++++---- .../hadoop/hbase/regionserver/HStore.java | 5 ++++- .../hadoop/hbase/regionserver/TestHStore.java | 20 ++++++++++++++++++ 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index f89a6194cefb..7fb1f1ec85bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -20,6 +20,7 @@ import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.yetus.audience.InterfaceAudience; @@ -30,7 +31,7 @@ * Stores all of the cache objects and configuration for a single HFile. */ @InterfaceAudience.Private -public class CacheConfig { +public class CacheConfig implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(CacheConfig.class.getName()); /** @@ -124,13 +125,13 @@ public class CacheConfig { * turned off on a per-family or per-request basis). If off we will STILL cache meta blocks; i.e. * INDEX and BLOOM types. This cannot be disabled. */ - private final boolean cacheDataOnRead; + private volatile boolean cacheDataOnRead; /** Whether blocks should be flagged as in-memory when being cached */ private final boolean inMemory; /** Whether data blocks should be cached when new files are written */ - private boolean cacheDataOnWrite; + private volatile boolean cacheDataOnWrite; /** Whether index blocks should be cached when new files are written */ private boolean cacheIndexesOnWrite; @@ -139,7 +140,7 @@ public class CacheConfig { private boolean cacheBloomsOnWrite; /** Whether blocks of a file should be evicted when the file is closed */ - private boolean evictOnClose; + private volatile boolean evictOnClose; /** Whether data blocks should be stored in compressed and/or encrypted form in the cache */ private final boolean cacheDataCompressed; @@ -464,4 +465,16 @@ public String toString() { + shouldEvictOnClose() + ", cacheDataCompressed=" + shouldCacheDataCompressed() + ", prefetchOnOpen=" + shouldPrefetchOnOpen(); } + + @Override + public void onConfigurationChange(Configuration conf) { + cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ); + cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE); + evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE); + LOG.info( + "Config hbase.block.data.cacheonread is changed to {}, " + + "hbase.rs.cacheblocksonwrite is changed to {}, " + + "hbase.rs.evictblocksonclose is changed to {}", + cacheDataOnRead, cacheDataOnWrite, evictOnClose); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 43a63359961e..9954c78142e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -2186,7 +2186,10 @@ public void onConfigurationChange(Configuration conf) { */ @Override public void registerChildren(ConfigurationManager manager) { - // No children to register + CacheConfig cacheConfig = this.storeContext.getCacheConf(); + if (cacheConfig != null) { + manager.registerObserver(cacheConfig); + } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 2e999dfaa455..e888639eac4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.CACHE_DATA_ON_READ_KEY; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_CACHE_DATA_ON_READ; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_CACHE_DATA_ON_WRITE; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_CLOSE; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY; import static org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -2604,6 +2610,9 @@ public void testOnConfigurationChange() throws IOException { Configuration conf = HBaseConfiguration.create(); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, COMMON_MAX_FILES_TO_COMPACT); + conf.setBoolean(CACHE_DATA_ON_READ_KEY, false); + conf.setBoolean(CACHE_BLOCKS_ON_WRITE_KEY, true); + conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family) .setConfiguration(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, String.valueOf(STORE_MAX_FILES_TO_COMPACT)) @@ -2614,8 +2623,19 @@ public void testOnConfigurationChange() throws IOException { conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, NEW_COMMON_MAX_FILES_TO_COMPACT); this.store.onConfigurationChange(conf); + assertEquals(STORE_MAX_FILES_TO_COMPACT, store.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact()); + + assertEquals(conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ), false); + assertEquals(conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE), true); + assertEquals(conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE), true); + + // reset to default values + conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ); + conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE); + conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE); + this.store.onConfigurationChange(conf); } /** From 4a9f6f2fe7b345891c8d37961e7db661042e0e93 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 25 Apr 2024 15:56:42 +0200 Subject: [PATCH 336/514] HBASE-28518 Allow specifying a filter for the REST multiget endpoint (addendum: add back SCAN_FILTER constant) (#5852) Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/rest/Constants.java | 6 ++++++ .../java/org/apache/hadoop/hbase/rest/MultiRowResource.java | 1 - .../java/org/apache/hadoop/hbase/rest/TableResource.java | 1 - 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 71080de07dd8..8d606793be64 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -86,6 +86,12 @@ public interface Constants { String SCAN_LIMIT = "limit"; String SCAN_FETCH_SIZE = "hbase.rest.scan.fetchsize"; String FILTER = "filter"; + /** + * @deprecated Since 2.4.18/2.5.9/2.6.0, will be removed in 4.0.0. Please use {@link #FILTER} + * instead. + */ + @Deprecated + String SCAN_FILTER = FILTER; String FILTER_B64 = "filter_b64"; String SCAN_REVERSED = "reversed"; String SCAN_CACHE_BLOCKS = "cacheblocks"; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 47b3c22a7c95..4c2f6c3216bd 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -87,7 +87,6 @@ public Response get(final @Context UriInfo uriInfo, if (paramFilterB64 != null) { filterBytes = base64Urldecoder.decode(paramFilterB64); } else if (paramFilter != null) { - // Not binary clean filterBytes = paramFilter.getBytes(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index fc3fb6bf7057..b311e7b0eb77 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -183,7 +183,6 @@ public TableScanResource getScanResource(final @PathParam("scanspec") String sca if (paramFilterB64 != null) { filterBytes = base64Urldecoder.decode(paramFilterB64); } else if (paramFilter != null) { - // Not binary clean filterBytes = paramFilter.getBytes(); } if (filterBytes != null) { From 4b49e53a91b21875e10ad819656c6856a060c81f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 25 Apr 2024 23:38:18 +0800 Subject: [PATCH 337/514] HBASE-28436 Addendum fix naming issue (#5855) --- .../hbase/client/ConnectionRegistryFactory.java | 12 ++++++------ ...tor.java => RpcConnectionRegistryURIFactory.java} | 4 ++-- ...ator.java => ZKConnectionRegistryURIFactory.java} | 4 ++-- ....hadoop.hbase.client.ConnectionRegistryURIFactory | 4 ++-- ...ng.java => TestConnectionRegistryUriParsing.java} | 4 ++-- 5 files changed, 14 insertions(+), 14 deletions(-) rename hbase-client/src/main/java/org/apache/hadoop/hbase/client/{RpcConnectionRegistryCreator.java => RpcConnectionRegistryURIFactory.java} (93%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/client/{ZKConnectionRegistryCreator.java => ZKConnectionRegistryURIFactory.java} (94%) rename hbase-client/src/test/java/org/apache/hadoop/hbase/client/{TestConnectionRegistryCreatorUriParsing.java => TestConnectionRegistryUriParsing.java} (97%) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index 5eef2c5f93e1..af4cf75af7fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -39,7 +39,7 @@ final class ConnectionRegistryFactory { private static final Logger LOG = LoggerFactory.getLogger(ConnectionRegistryFactory.class); - private static final ImmutableMap CREATORS; + private static final ImmutableMap FACTORIES; static { ImmutableMap.Builder builder = ImmutableMap.builder(); for (ConnectionRegistryURIFactory factory : ServiceLoader @@ -47,7 +47,7 @@ final class ConnectionRegistryFactory { builder.put(factory.getScheme().toLowerCase(), factory); } // throw IllegalArgumentException if there are duplicated keys - CREATORS = builder.buildOrThrow(); + FACTORIES = builder.buildOrThrow(); } private ConnectionRegistryFactory() { @@ -69,12 +69,12 @@ static ConnectionRegistry create(URI uri, Configuration conf, User user) throws LOG.warn("No scheme specified for {}, fallback to use old way", uri); return create(conf, user); } - ConnectionRegistryURIFactory creator = CREATORS.get(uri.getScheme().toLowerCase()); - if (creator == null) { - LOG.warn("No creator registered for {}, fallback to use old way", uri); + ConnectionRegistryURIFactory factory = FACTORIES.get(uri.getScheme().toLowerCase()); + if (factory == null) { + LOG.warn("No factory registered for {}, fallback to use old way", uri); return create(conf, user); } - return creator.create(uri, conf, user); + return factory.create(uri, conf, user); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java similarity index 93% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java index cb2338b1429d..79081ee6c649 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java @@ -29,9 +29,9 @@ * Connection registry creator implementation for creating {@link RpcConnectionRegistry}. */ @InterfaceAudience.Private -public class RpcConnectionRegistryCreator implements ConnectionRegistryURIFactory { +public class RpcConnectionRegistryURIFactory implements ConnectionRegistryURIFactory { - private static final Logger LOG = LoggerFactory.getLogger(RpcConnectionRegistryCreator.class); + private static final Logger LOG = LoggerFactory.getLogger(RpcConnectionRegistryURIFactory.class); @Override public ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java similarity index 94% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java index 8aa51e04fe4d..939adab23b78 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java @@ -30,9 +30,9 @@ * Connection registry creator implementation for creating {@link ZKConnectionRegistry}. */ @InterfaceAudience.Private -public class ZKConnectionRegistryCreator implements ConnectionRegistryURIFactory { +public class ZKConnectionRegistryURIFactory implements ConnectionRegistryURIFactory { - private static final Logger LOG = LoggerFactory.getLogger(ZKConnectionRegistryCreator.class); + private static final Logger LOG = LoggerFactory.getLogger(ZKConnectionRegistryURIFactory.class); @Override public ConnectionRegistry create(URI uri, Configuration conf, User user) throws IOException { diff --git a/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory b/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory index b25a569776f1..229119c6635a 100644 --- a/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory +++ b/hbase-client/src/main/resources/META-INF/services/org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory @@ -13,5 +13,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -org.apache.hadoop.hbase.client.RpcConnectionRegistryCreator -org.apache.hadoop.hbase.client.ZKConnectionRegistryCreator \ No newline at end of file +org.apache.hadoop.hbase.client.RpcConnectionRegistryURIFactory +org.apache.hadoop.hbase.client.ZKConnectionRegistryURIFactory \ No newline at end of file diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryUriParsing.java similarity index 97% rename from hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java rename to hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryUriParsing.java index 4dabd894b5b4..c841346e810e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryCreatorUriParsing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryUriParsing.java @@ -46,11 +46,11 @@ * Make sure we can successfully parse the URI component */ @Category({ ClientTests.class, SmallTests.class }) -public class TestConnectionRegistryCreatorUriParsing { +public class TestConnectionRegistryUriParsing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionRegistryCreatorUriParsing.class); + HBaseClassTestRule.forClass(TestConnectionRegistryUriParsing.class); private Configuration conf; From 8828e45b6109e834745d30311e3d6ce808f38537 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 26 Apr 2024 13:57:25 +0800 Subject: [PATCH 338/514] HBASE-28512 Update error prone to 2.26.1 (#5838) Signed-off-by: Xin Sun --- .../java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java | 3 ++- .../org/apache/hadoop/hbase/regionserver/TestRegionInfo.java | 2 +- .../hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java | 2 +- pom.xml | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java index e5835fd65b61..0fe46194083a 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import javax.servlet.http.HttpServletRequest; @@ -94,7 +95,7 @@ public void testRequestQuoting() throws Exception { quoter.getParameterValues("x")); Mockito.doReturn(null).when(mockReq).getParameterValues("x"); - assertArrayEquals("Test that missing parameters dont cause NPE for array", null, + assertNull("Test that missing parameters dont cause NPE for array", quoter.getParameterValues("x")); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java index de177ec3ace8..60fe39ecc77f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java @@ -320,7 +320,7 @@ public void testComparator() { b = RegionInfoBuilder.newBuilder(t).setStartKey(midway).build(); assertTrue(a.compareTo(b) < 0); assertTrue(b.compareTo(a) > 0); - assertEquals(a, a); + assertTrue(a.equals(a)); assertEquals(0, a.compareTo(a)); a = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("a")) .setEndKey(Bytes.toBytes("d")).build(); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 9e9e1554aac0..810bb87baa9e 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -536,7 +536,7 @@ public void testDeleteFamily() throws Exception { get = new TGet(wrap(rowName)); result = handler.get(table, get); - assertArrayEquals(null, result.getRow()); + assertNull(result.getRow()); assertEquals(0, result.getColumnValuesSize()); } diff --git a/pom.xml b/pom.xml index 635afe9b78ef..195d3d739474 100644 --- a/pom.xml +++ b/pom.xml @@ -892,7 +892,7 @@ --> 8.29 3.1.0 - 2.16 + 2.26.1 2.4.2 1.0.0 1.8 From 52e0a8cdaf6054c82faea7dac79ebc75925f8855 Mon Sep 17 00:00:00 2001 From: Nikita Pande <37657012+nikita15p@users.noreply.github.com> Date: Fri, 26 Apr 2024 11:32:57 +0530 Subject: [PATCH 339/514] HBASE-28552 Bump up bouncycastle dependency from 1.76 to 1.78 (#5854) Signed-off-by: Duo Zhang --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 195d3d739474..b750cac0557c 100644 --- a/pom.xml +++ b/pom.xml @@ -877,7 +877,7 @@ 2.2.1 1.0.58 2.12.2 - 1.76 + 1.78 1.5.1 1.0.1 1.1.0 From ba099131ca14ce861b25462f2daba9cce7bc90e4 Mon Sep 17 00:00:00 2001 From: Aman Poonia Date: Sun, 28 Apr 2024 07:16:09 +0530 Subject: [PATCH 340/514] HBASE-28405 Fix failed procedure rollback when region was not closed and is still in state merging (#5799) Co-authored-by: Duo Zhang Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../MergeTableRegionsProcedure.java | 30 +++++++++++++++++-- .../hbase/namespace/TestNamespaceAuditor.java | 4 +-- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 7d4ec71d35b1..b9a3ee13361d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -291,6 +291,7 @@ protected void rollbackState(final MasterProcedureEnv env, final MergeTableRegio postRollBackMergeRegions(env); break; case MERGE_TABLE_REGIONS_PREPARE: + rollbackPrepareMerge(env); break; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); @@ -514,6 +515,19 @@ private boolean isMergeable(final MasterProcedureEnv env, final RegionState rs) return response.hasMergeable() && response.getMergeable(); } + /** + * Action for rollback a merge table after prepare merge + */ + private void rollbackPrepareMerge(final MasterProcedureEnv env) throws IOException { + for (RegionInfo rinfo : regionsToMerge) { + RegionStateNode regionStateNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(rinfo); + if (regionStateNode.getState() == State.MERGING) { + regionStateNode.setState(State.OPEN); + } + } + } + /** * Pre merge region action * @param env MasterProcedureEnv @@ -639,8 +653,20 @@ private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOExceptio * Rollback close regions **/ private void rollbackCloseRegionsForMerge(MasterProcedureEnv env) throws IOException { - AssignmentManagerUtil.reopenRegionsForRollback(env, Arrays.asList(regionsToMerge), - getRegionReplication(env), getServerName(env)); + // At this point we should check if region was actually closed. If it was not closed then we + // don't need to repoen the region and we can just change the regionNode state to OPEN. + // if it is alredy closed then we need to do a reopen of region + List toAssign = new ArrayList<>(); + for (RegionInfo rinfo : regionsToMerge) { + RegionStateNode regionStateNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(rinfo); + if (regionStateNode.getState() != State.MERGING) { + // same as before HBASE-28405 + toAssign.add(rinfo); + } + } + AssignmentManagerUtil.reopenRegionsForRollback(env, toAssign, getRegionReplication(env), + getServerName(env)); } private TransitRegionStateProcedure[] createUnassignProcedures(MasterProcedureEnv env) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index a9d30fa7fccc..ab17f94389e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; @@ -407,7 +407,7 @@ public boolean evaluate() throws Exception { try { ADMIN.split(tableTwo, Bytes.toBytes("6")); fail(); - } catch (DoNotRetryRegionException e) { + } catch (DoNotRetryIOException e) { // Expected } Thread.sleep(2000); From d493e2c1827ff5c9af424a8680ae879cfb73976a Mon Sep 17 00:00:00 2001 From: Vineet Kumar Maheshwari Date: Sun, 28 Apr 2024 19:56:13 +0530 Subject: [PATCH 341/514] HBASE-28482 Reverse scan with tags throws ArrayIndexOutOfBoundsException with DBE in setCurrentBlock flow (#5792) Signed-off-by: Pankaj Kumar Signed-off-by: Bryan Beaudreault --- .../io/encoding/BufferedDataBlockEncoder.java | 10 ++++-- .../hadoop/hbase/regionserver/TestTags.java | 36 +++++++++++++------ 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index f998f40d68b9..1794422d5cd9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -834,6 +834,13 @@ public int compareKey(CellComparator comparator, Cell key) { public void setCurrentBuffer(ByteBuff buffer) { if (this.tagCompressionContext != null) { this.tagCompressionContext.clear(); + + // Prior seekToKeyInBlock may have reset this to false if we fell back to previous + // seeker state. This is an optimization so we don't have to uncompress tags again when + // reading last state. + // In seekBefore flow, if block change happens then rewind is not called and + // setCurrentBuffer is called, so need to uncompress any tags we see. + current.uncompressTags = true; } currentBuffer = buffer; current.currentBuffer = currentBuffer; @@ -876,9 +883,6 @@ public void rewind() { // reading last state. // In case of rewind, we are starting from the beginning of the buffer, so we need // to uncompress any tags we see. - // It may make sense to reset this in setCurrentBuffer as well, but we seem to only call - // setCurrentBuffer after StoreFileScanner.seekAtOrAfter which calls next to consume the - // seeker state. Rewind is called by seekBefore, which doesn't and leaves us in this state. current.uncompressTags = true; } decodeFirst(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 3d45fc56503f..45b927c07afa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -124,32 +124,46 @@ public void testReverseScanWithDBE() throws IOException { try (Connection connection = ConnectionFactory.createConnection(conf)) { for (DataBlockEncoding encoding : DataBlockEncoding.values()) { - testReverseScanWithDBE(connection, encoding, family); + testReverseScanWithDBE(connection, encoding, family, HConstants.DEFAULT_BLOCKSIZE, 10); } } } - private void testReverseScanWithDBE(Connection conn, DataBlockEncoding encoding, byte[] family) - throws IOException { + /** + * Test that we can do reverse scans when writing tags and using DataBlockEncoding. Fails with an + * exception for PREFIX, DIFF, and FAST_DIFF + */ + @Test + public void testReverseScanWithDBEWhenCurrentBlockUpdates() throws IOException { + byte[] family = Bytes.toBytes("0"); + + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + + try (Connection connection = ConnectionFactory.createConnection(conf)) { + for (DataBlockEncoding encoding : DataBlockEncoding.values()) { + testReverseScanWithDBE(connection, encoding, family, 1024, 30000); + } + } + } + + private void testReverseScanWithDBE(Connection conn, DataBlockEncoding encoding, byte[] family, + int blockSize, int maxRows) throws IOException { LOG.info("Running test with DBE={}", encoding); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName() + "-" + encoding); - TEST_UTIL.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(family).setDataBlockEncoding(encoding).build()) - .build(), null); + TEST_UTIL.createTable( + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(family).setDataBlockEncoding(encoding).setBlocksize(blockSize).build()).build(), + null); Table table = conn.getTable(tableName); - int maxRows = 10; byte[] val1 = new byte[10]; byte[] val2 = new byte[10]; Bytes.random(val1); Bytes.random(val2); for (int i = 0; i < maxRows; i++) { - if (i == maxRows / 2) { - TEST_UTIL.flush(tableName); - } table.put(new Put(Bytes.toBytes(i)).addColumn(family, Bytes.toBytes(1), val1) .addColumn(family, Bytes.toBytes(2), val2).setTTL(600_000)); } From 4230c42b402f909be9e90e71ae62c5778a14100c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 28 Apr 2024 22:45:52 +0800 Subject: [PATCH 342/514] HBASE-28554 TestZooKeeperScanPolicyObserver and TestAdminShell fail 100% of times on flaky dashboard (#5859) Signed-off-by: Bryan Beaudreault --- .../example/TestZooKeeperScanPolicyObserver.java | 8 +++++++- hbase-shell/src/main/ruby/hbase/admin.rb | 2 -- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index e72374e087b6..da67196661af 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; @@ -83,7 +84,12 @@ public static void tearDown() throws Exception { private void setExpireBefore(long time) throws KeeperException, InterruptedException, IOException { - ZooKeeper zk = UTIL.getZooKeeperWatcher().getRecoverableZooKeeper().getZooKeeper(); + RecoverableZooKeeper recoverableZk = UTIL.getZooKeeperWatcher().getRecoverableZooKeeper(); + // we need to call this for setting up the zookeeper connection + recoverableZk.reconnectAfterExpiration(); + // we have to use the original ZooKeeper as the RecoverableZooKeeper will append a magic prefix + // for the data stored on zookeeper + ZooKeeper zk = recoverableZk.getZooKeeper(); if (zk.exists(ZooKeeperScanPolicyObserver.NODE, false) == null) { zk.create(ZooKeeperScanPolicyObserver.NODE, Bytes.toBytes(time), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index b04a79229831..6cbaa385bec7 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -486,8 +486,6 @@ def zk_dump 'admin', nil ) - zk = @zk_wrapper.getRecoverableZooKeeper.getZooKeeper - @zk_main = org.apache.zookeeper.ZooKeeperMain.new(zk) org.apache.hadoop.hbase.zookeeper.ZKDump.dump(@zk_wrapper) end From 0a4daab53931a8c5cd0dd924709eddd90a96ab04 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 30 Apr 2024 12:51:33 +0200 Subject: [PATCH 343/514] HBASE-28523 Use a single get call in REST multiget endpoint (#5862) Signed-off-by: Ankit Singhal --- .../hadoop/hbase/rest/MultiRowResource.java | 36 ++++---- .../hbase/rest/MultiRowResultReader.java | 85 +++++++++++++++++++ 2 files changed, 104 insertions(+), 17 deletions(-) create mode 100644 hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 4c2f6c3216bd..99fc0c845e6f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -18,10 +18,13 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; +import java.util.ArrayList; import java.util.Base64; import java.util.Base64.Decoder; +import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.rest.model.CellModel; @@ -98,8 +101,7 @@ public Response get(final @Context UriInfo uriInfo, ParseFilter pf = new ParseFilter(); parsedParamFilter = pf.parseFilterString(filterBytes); } - CellSetModel model = new CellSetModel(); - // TODO map this to a Table.get(List gets) call instead of multiple get calls + List rowSpecs = new ArrayList<>(); for (String rk : params.get(ROW_KEYS_PARAM_NAME)) { RowSpec rowSpec = new RowSpec(rk, keyEncoding); @@ -112,24 +114,24 @@ public Response get(final @Context UriInfo uriInfo, rowSpec.addColumn(Bytes.toBytes(this.columns[i])); } } + rowSpecs.add(rowSpec); + } - ResultGenerator generator = ResultGenerator.fromRowSpec(this.tableResource.getName(), - rowSpec, parsedParamFilter, !params.containsKey(NOCACHE_PARAM_NAME)); - Cell value = null; - RowModel rowModel = new RowModel(rowSpec.getRow()); - if (generator.hasNext()) { - while ((value = generator.next()) != null) { - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); - } - model.addRow(rowModel); - } else { - if (LOG.isTraceEnabled()) { - LOG.trace("The row : " + rk + " not found in the table."); - } + MultiRowResultReader reader = new MultiRowResultReader(this.tableResource.getName(), rowSpecs, + parsedParamFilter, !params.containsKey(NOCACHE_PARAM_NAME)); + + CellSetModel model = new CellSetModel(); + for (Result r : reader.getResults()) { + if (r.isEmpty()) { + continue; } + RowModel rowModel = new RowModel(r.getRow()); + for (Cell c : r.listCells()) { + rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), + c.getTimestamp(), CellUtil.cloneValue(c))); + } + model.addRow(rowModel); } - if (model.getRows().isEmpty()) { // If no rows found. servlet.getMetrics().incrementFailedGetRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java new file mode 100644 index 000000000000..2903c37edf92 --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class MultiRowResultReader { + + private static final Logger LOG = LoggerFactory.getLogger(MultiRowResultReader.class); + + private Result[] results; + + public MultiRowResultReader(final String tableName, final Collection rowspecs, + final Filter filter, final boolean cacheBlocks) throws IOException { + try (Table table = RESTServlet.getInstance().getTable(tableName)) { + List gets = new ArrayList<>(rowspecs.size()); + for (RowSpec rowspec : rowspecs) { + Get get = new Get(rowspec.getRow()); + if (rowspec.hasColumns()) { + for (byte[] col : rowspec.getColumns()) { + byte[][] split = CellUtil.parseColumn(col); + if (split.length == 1) { + get.addFamily(split[0]); + } else if (split.length == 2) { + get.addColumn(split[0], split[1]); + } else { + throw new IllegalArgumentException("Invalid column specifier."); + } + } + } + get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + get.readVersions(rowspec.getMaxVersions()); + if (filter != null) { + get.setFilter(filter); + } + get.setCacheBlocks(cacheBlocks); + gets.add(get); + } + results = table.get(gets); + } catch (DoNotRetryIOException e) { + // TODO this is copied from RowResultGenerator, but we probably shouldn't swallow + // every type of exception but AccessDeniedException + LOG.warn(StringUtils.stringifyException(e)); + // Lets get the exception rethrown to get a more meaningful error message than 404 + if (e instanceof AccessDeniedException) { + throw e; + } + } + } + + public Result[] getResults() { + return results; + } + +} From 8a2f3ef79304f81ce1e81cf4635a7d3462c772e2 Mon Sep 17 00:00:00 2001 From: droudnitsky <168442446+droudnitsky@users.noreply.github.com> Date: Tue, 30 Apr 2024 09:55:40 -0400 Subject: [PATCH 344/514] HBASE-28533 On split procedure rollback revert parent region state back to OPEN (#5863) Co-authored-by: Daniel Roudnitsky Signed-off-by: Duo Zhang --- .../assignment/SplitTableRegionProcedure.java | 15 ++++++++++++++- .../assignment/TestSplitTableRegionProcedure.java | 6 ++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 2e2182b25d29..afa0f5e42b07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -392,7 +392,8 @@ protected void rollbackState(final MasterProcedureEnv env, final SplitTableRegio postRollBackSplitRegion(env); break; case SPLIT_TABLE_REGION_PREPARE: - break; // nothing to do + rollbackPrepareSplit(env); + break; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); } @@ -572,6 +573,18 @@ public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOExcepti return true; } + /** + * Rollback prepare split region + * @param env MasterProcedureEnv + */ + private void rollbackPrepareSplit(final MasterProcedureEnv env) { + RegionStateNode parentRegionStateNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion()); + if (parentRegionStateNode.getState() == State.SPLITTING) { + parentRegionStateNode.setState(State.OPEN); + } + } + /** * Action before splitting region in a table. * @param env MasterProcedureEnv diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java index 6ec36e75bea2..99092712ec72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility; @@ -242,6 +243,11 @@ public void testRollbackForSplitTableRegionWithReplica() throws Exception { // There should not be any active OpenRegionProcedure procExec.getActiveProceduresNoCopy() .forEach(p -> assertFalse(p instanceof OpenRegionProcedure)); + + // Check that procedure rollback reverted parent region state to OPEN + AssignmentManager am = UTIL.getHBaseCluster().getMaster().getAssignmentManager(); + RegionStateNode regionStateNode = am.getRegionStates().getRegionStateNode(regions[0]); + assertEquals(RegionState.State.OPEN, regionStateNode.getState()); } @Test From e9ced397269407ac4963457835b010eab9d6a2d3 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 1 May 2024 20:41:42 +0800 Subject: [PATCH 345/514] HBASE-28558 Fix constructors for sub classes of Connection (#5861) Signed-off-by: Guanghao Zhang Signed-off-by: GeorryHuang --- .../hbase/client/ConnectionFactory.java | 49 +++++++++++++++---- .../mapreduce/TestHFileOutputFormat2.java | 11 +++-- .../TestMultiTableInputFormatBase.java | 3 +- .../mapreduce/TestTableInputFormatBase.java | 3 +- 4 files changed, 51 insertions(+), 15 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index f4ef4496dfcf..b9b156bf36d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -36,6 +36,10 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Throwables; /** * A non-instantiable class that manages creation of {@link Connection}s. Managing the lifecycle of @@ -74,6 +78,8 @@ @InterfaceAudience.Public public class ConnectionFactory { + private static final Logger LOG = LoggerFactory.getLogger(ConnectionFactory.class); + public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = "hbase.client.async.connection.impl"; @@ -386,16 +392,39 @@ public static Connection createConnection(URI connectionUri, Configuration conf, Class clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionOverAsyncConnection.class, Connection.class); if (clazz != ConnectionOverAsyncConnection.class) { - try { - // Default HCM#HCI is not accessible; make it so before invoking. - Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, - ExecutorService.class, User.class, Map.class); - constructor.setAccessible(true); - return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor - .newInstance(conf, pool, user, connectionAttributes)); - } catch (Exception e) { - throw new IOException(e); - } + return TraceUtil.trace(() -> { + try { + // Default HCM#HCI is not accessible; make it so before invoking. + Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, + ExecutorService.class, User.class, ConnectionRegistry.class, Map.class); + constructor.setAccessible(true); + ConnectionRegistry registry = connectionUri != null + ? ConnectionRegistryFactory.create(connectionUri, conf, user) + : ConnectionRegistryFactory.create(conf, user); + return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor + .newInstance(conf, pool, user, registry, connectionAttributes)); + } catch (NoSuchMethodException e) { + LOG.debug("Constructor with connection registry not found for class {}," + + " fallback to use old constructor", clazz.getName(), e); + } catch (Exception e) { + Throwables.throwIfInstanceOf(e, IOException.class); + Throwables.throwIfUnchecked(e); + throw new IOException(e); + } + + try { + // Default HCM#HCI is not accessible; make it so before invoking. + Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, + ExecutorService.class, User.class, Map.class); + constructor.setAccessible(true); + return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor + .newInstance(conf, pool, user, connectionAttributes)); + } catch (Exception e) { + Throwables.throwIfInstanceOf(e, IOException.class); + Throwables.throwIfUnchecked(e); + throw new IOException(e); + } + }, () -> TraceUtil.createSpan(ConnectionFactory.class.getSimpleName() + ".createConnection")); } else { return FutureUtils.get(createAsyncConnection(connectionUri, conf, user, connectionAttributes)) .toConnection(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index fc7f66129d35..e67ee3dbb736 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.apache.hadoop.hbase.client.ConnectionFactory.createAsyncConnection; import static org.apache.hadoop.hbase.regionserver.HStoreFile.BLOOM_FILTER_TYPE_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistry; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Hbck; import org.apache.hadoop.hbase.client.Put; @@ -1668,9 +1668,14 @@ private static class ConfigurationCaptorConnection implements Connection { private final Connection delegate; public ConfigurationCaptorConnection(Configuration conf, ExecutorService es, User user, - Map connectionAttributes) throws IOException { + ConnectionRegistry registry, Map connectionAttributes) throws IOException { + // here we do not use this registry, so close it... + registry.close(); + // here we use createAsyncConnection, to avoid infinite recursive as we reset the Connection + // implementation in below method delegate = - FutureUtils.get(createAsyncConnection(conf, user, connectionAttributes)).toConnection(); + FutureUtils.get(ConnectionFactory.createAsyncConnection(conf, user, connectionAttributes)) + .toConnection(); final String uuid = conf.get(UUID_KEY); if (uuid != null) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 0c879bd5ace3..7c136fa2a19f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionRegistry; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; @@ -124,7 +125,7 @@ public static class MRSplitsConnection implements Connection { static final AtomicInteger creations = new AtomicInteger(0); MRSplitsConnection(Configuration conf, ExecutorService pool, User user, - Map connectionAttributes) throws IOException { + ConnectionRegistry registry, Map connectionAttributes) throws IOException { this.configuration = conf; creations.incrementAndGet(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index f41282b8f4f8..7b2170d19520 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionRegistry; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -213,7 +214,7 @@ private static class ConnectionForMergeTesting implements Connection { } ConnectionForMergeTesting(Configuration conf, ExecutorService pool, User user, - Map connectionAttributes) throws IOException { + ConnectionRegistry registry, Map connectionAttributes) throws IOException { } @Override From 3d66866f416fc9a31de1c2d87d2194c054c2b07b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 1 May 2024 21:30:26 +0800 Subject: [PATCH 346/514] HBASE-28521 Use standard ConnectionRegistry and Client API to get region server list in in replication (#5825) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Guanghao Zhang Reviewed-by: Andor Molnár --- .../replication/HBaseReplicationEndpoint.java | 183 ++++++------------ .../HBaseInterClusterReplicationEndpoint.java | 11 +- .../TestHBaseReplicationEndpoint.java | 6 +- .../replication/TestReplicationBase.java | 10 + ...ionStatusBothNormalAndRecoveryLagging.java | 6 +- ...StatusSourceStartedTargetStoppedNewOp.java | 5 +- ...StatusSourceStartedTargetStoppedNoOps.java | 5 +- ...ourceStartedTargetStoppedWithRecovery.java | 5 +- 8 files changed, 91 insertions(+), 140 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 564f43324ccd..f0ea993a41ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -19,28 +19,25 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.ReservoirSample; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.KeeperException.AuthFailedException; -import org.apache.zookeeper.KeeperException.ConnectionLossException; -import org.apache.zookeeper.KeeperException.SessionExpiredException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,12 +53,11 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private static final Logger LOG = LoggerFactory.getLogger(HBaseReplicationEndpoint.class); - private ZKWatcher zkw = null; - private final Object zkwLock = new Object(); - protected Configuration conf; - private AsyncClusterConnection conn; + private final Object connLock = new Object(); + + private volatile AsyncClusterConnection conn; /** * Default maximum number of times a replication sink can be reported as bad before it will no @@ -106,36 +102,15 @@ public void init(Context context) throws IOException { this.badReportCounts = Maps.newHashMap(); } - protected void disconnect() { - synchronized (zkwLock) { - if (zkw != null) { - zkw.close(); - } - } - if (this.conn != null) { - try { - this.conn.close(); - this.conn = null; - } catch (IOException e) { - LOG.warn("{} Failed to close the connection", ctx.getPeerId()); - } - } - } - - /** - * A private method used to re-establish a zookeeper session with a peer cluster. - */ - private void reconnect(KeeperException ke) { - if ( - ke instanceof ConnectionLossException || ke instanceof SessionExpiredException - || ke instanceof AuthFailedException - ) { - String clusterKey = ctx.getPeerConfig().getClusterKey(); - LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke); - try { - reloadZkWatcher(); - } catch (IOException io) { - LOG.warn("Creation of ZookeeperWatcher failed for peer {}", clusterKey, io); + private void disconnect() { + synchronized (connLock) { + if (this.conn != null) { + try { + this.conn.close(); + this.conn = null; + } catch (IOException e) { + LOG.warn("{} Failed to close the connection", ctx.getPeerId()); + } } } } @@ -152,13 +127,7 @@ public void stop() { @Override protected void doStart() { - try { - reloadZkWatcher(); - connectPeerCluster(); - notifyStarted(); - } catch (IOException e) { - notifyFailed(e); - } + notifyStarted(); } @Override @@ -168,44 +137,40 @@ protected void doStop() { } @Override - // Synchronize peer cluster connection attempts to avoid races and rate - // limit connections when multiple replication sources try to connect to - // the peer cluster. If the peer cluster is down we can get out of control - // over time. public UUID getPeerUUID() { - UUID peerUUID = null; try { - synchronized (zkwLock) { - peerUUID = ZKClusterId.getUUIDForCluster(zkw); - } - } catch (KeeperException ke) { - reconnect(ke); + AsyncClusterConnection conn = connect(); + String clusterId = FutureUtils + .get(conn.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.CLUSTER_ID))) + .getClusterId(); + return UUID.fromString(clusterId); + } catch (IOException e) { + LOG.warn("Failed to get cluster id for cluster", e); + return null; } - return peerUUID; } - /** - * Closes the current ZKW (if not null) and creates a new one - * @throws IOException If anything goes wrong connecting - */ - private void reloadZkWatcher() throws IOException { - synchronized (zkwLock) { - if (zkw != null) { - zkw.close(); - } - zkw = - new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); - zkw.registerListener(new PeerRegionServerListener(this)); + // do not call this method in doStart method, only initialize the connection to remote cluster + // when you actually wants to make use of it. The problem here is that, starting the replication + // endpoint is part of the region server initialization work, so if the peer cluster is fully + // down and we can not connect to it, we will cause the initialization to fail and crash the + // region server, as we need the cluster id while setting up the AsyncClusterConnection, which + // needs to at least connect to zookeeper or some other servers in the peer cluster based on + // different connection registry implementation + private AsyncClusterConnection connect() throws IOException { + AsyncClusterConnection c = this.conn; + if (c != null) { + return c; } - } - - private void connectPeerCluster() throws IOException { - try { - conn = createConnection(this.conf); - } catch (IOException ioe) { - LOG.warn("{} Failed to create connection for peer cluster", ctx.getPeerId(), ioe); - throw ioe; + synchronized (connLock) { + c = this.conn; + if (c != null) { + return c; + } + c = createConnection(this.conf); + conn = c; } + return c; } @Override @@ -224,36 +189,27 @@ public boolean isAborted() { * Get the list of all the region servers from the specified peer * @return list of region server addresses or an empty list if the slave is unavailable */ - protected List fetchSlavesAddresses() { - List children = null; + // will be overrided in tests so protected + protected Collection fetchPeerAddresses() { try { - synchronized (zkwLock) { - children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); - } - } catch (KeeperException ke) { - if (LOG.isDebugEnabled()) { - LOG.debug("Fetch slaves addresses failed", ke); - } - reconnect(ke); - } - if (children == null) { + return FutureUtils.get(connect().getAdmin().getRegionServers(true)); + } catch (IOException e) { + LOG.debug("Fetch peer addresses failed", e); return Collections.emptyList(); } - List addresses = new ArrayList<>(children.size()); - for (String child : children) { - addresses.add(ServerName.parseServerName(child)); - } - return addresses; } protected synchronized void chooseSinks() { - List slaveAddresses = fetchSlavesAddresses(); + Collection slaveAddresses = fetchPeerAddresses(); if (slaveAddresses.isEmpty()) { LOG.warn("No sinks available at peer. Will not be able to replicate"); + this.sinkServers = Collections.emptyList(); + } else { + int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); + ReservoirSample sample = new ReservoirSample<>(numSinks); + sample.add(slaveAddresses.iterator()); + this.sinkServers = sample.getSamplingResult(); } - Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); - int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); - this.sinkServers = slaveAddresses.subList(0, numSinks); badReportCounts.clear(); } @@ -275,7 +231,7 @@ protected synchronized SinkPeer getReplicationSink() throws IOException { } ServerName serverName = sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size())); - return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); + return new SinkPeer(serverName, connect().getRegionServerAdmin(serverName)); } /** @@ -307,29 +263,6 @@ List getSinkServers() { return sinkServers; } - /** - * Tracks changes to the list of region servers in a peer's cluster. - */ - public static class PeerRegionServerListener extends ZKListener { - - private final HBaseReplicationEndpoint replicationEndpoint; - private final String regionServerListNode; - - public PeerRegionServerListener(HBaseReplicationEndpoint endpoint) { - super(endpoint.zkw); - this.replicationEndpoint = endpoint; - this.regionServerListNode = endpoint.zkw.getZNodePaths().rsZNode; - } - - @Override - public synchronized void nodeChildrenChanged(String path) { - if (path.equals(regionServerListNode)) { - LOG.info("Detected change to peer region servers, fetching updated list"); - replicationEndpoint.chooseSinks(); - } - } - } - /** * Wraps a replication region server sink to provide the ability to identify it. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index d895920a51a8..6bdc97732644 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -106,7 +106,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private boolean isSerial = false; // Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; - private volatile boolean stopping = false; @Override public void init(Context context) throws IOException { @@ -449,7 +448,7 @@ public boolean replicate(ReplicateContext replicateContext) { } List> batches = createBatches(replicateContext.getEntries()); - while (this.isRunning() && !this.stopping) { + while (this.isRunning()) { if (!isPeerEnabled()) { if (sleepForRetries("Replication is disabled", sleepMultiplier)) { sleepMultiplier++; @@ -514,14 +513,6 @@ protected boolean isPeerEnabled() { return ctx.getReplicationPeer().isPeerEnabled(); } - @Override - protected void doStop() { - // Allow currently running replication tasks to finish - this.stopping = true; - disconnect(); // don't call super.doStop() - notifyStopped(); - } - protected CompletableFuture replicateEntries(List entries, int batchIndex, int timeout) { int entriesHashCode = System.identityHashCode(entries); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java index 7e1df9d415aa..95adc8a365cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java @@ -21,6 +21,7 @@ import static org.mockito.Mockito.mock; import java.io.IOException; +import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -166,6 +167,9 @@ public void testReportBadSinkDownToZeroSinks() { ServerName serverNameA = endpoint.getSinkServers().get(0); ServerName serverNameB = endpoint.getSinkServers().get(1); + serverNames.remove(serverNameA); + serverNames.remove(serverNameB); + SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class)); @@ -191,7 +195,7 @@ public void setRegionServers(List regionServers) { } @Override - public List fetchSlavesAddresses() { + protected Collection fetchPeerAddresses() { return regionServers; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 27477527277f..1429c3277371 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -25,12 +25,14 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -369,6 +371,14 @@ protected static void runSmallBatchTest() throws IOException, InterruptedExcepti waitForReplication(NB_ROWS_IN_BATCH, NB_RETRIES); } + protected static void stopAllRegionServers(HBaseTestingUtil util) throws IOException { + List rses = util.getMiniHBaseCluster().getRegionServerThreads().stream() + .map(t -> t.getRegionServer().getServerName()).collect(Collectors.toList()); + for (ServerName rs : rses) { + util.getMiniHBaseCluster().stopRegionServer(rs); + } + } + @AfterClass public static void tearDownAfterClass() throws Exception { if (htable2 != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java index 161e3c848f78..de19d0f5f4a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java @@ -44,7 +44,11 @@ public class TestReplicationStatusBothNormalAndRecoveryLagging extends TestRepli @Test public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception { - UTIL2.shutdownMiniHBaseCluster(); + // stop all region servers, we need to keep the master up as the below assertions need to get + // cluster id from remote cluster, if master is also down, we can not get any information from + // the remote cluster after source cluster restarts + stopAllRegionServers(UTIL2); + // add some values to cluster 1 for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { Put p = new Put(Bytes.toBytes("row" + i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java index 92688cb2575a..c9ef613a21f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java @@ -45,7 +45,10 @@ public class TestReplicationStatusSourceStartedTargetStoppedNewOp extends TestRe @Test public void testReplicationStatusSourceStartedTargetStoppedNewOp() throws Exception { - UTIL2.shutdownMiniHBaseCluster(); + // stop all region servers, we need to keep the master up as the below assertions need to get + // cluster id from remote cluster, if master is also down, we can not get any information from + // the remote cluster after source cluster restarts + stopAllRegionServers(UTIL2); restartSourceCluster(1); Admin hbaseAdmin = UTIL1.getAdmin(); // add some values to source cluster diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java index 018bfb98c6e5..b3e52e858a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java @@ -42,7 +42,10 @@ public class TestReplicationStatusSourceStartedTargetStoppedNoOps extends TestRe @Test public void testReplicationStatusSourceStartedTargetStoppedNoOps() throws Exception { - UTIL2.shutdownMiniHBaseCluster(); + // stop all region servers, we need to keep the master up as the below assertions need to get + // cluster id from remote cluster, if master is also down, we can not get any information from + // the remote cluster after source cluster restarts + stopAllRegionServers(UTIL2); restartSourceCluster(1); Admin hbaseAdmin = UTIL1.getAdmin(); ServerName serverName = UTIL1.getHBaseCluster().getRegionServer(0).getServerName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java index 3b097cff970f..269fa1b38c70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java @@ -46,7 +46,10 @@ public class TestReplicationStatusSourceStartedTargetStoppedWithRecovery @Test public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws Exception { - UTIL2.shutdownMiniHBaseCluster(); + // stop all region servers, we need to keep the master up as the below assertions need to get + // cluster id from remote cluster, if master is also down, we can not get any information from + // the remote cluster after source cluster restarts + stopAllRegionServers(UTIL2); // add some values to cluster 1 for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { Put p = new Put(Bytes.toBytes("row" + i)); From 339d7adfcf8297809fc77eec335a4041a659576f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 6 May 2024 15:08:45 +0800 Subject: [PATCH 347/514] HBASE-28479 Change the deprecation cycle for HasMasterServices and HasRegionServerServices (#5872) Signed-off-by: Yi Mei --- .../apache/hadoop/hbase/coprocessor/HasMasterServices.java | 5 +++-- .../hadoop/hbase/coprocessor/HasRegionServerServices.java | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java index dd8babf21dd3..6b672178e2ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java @@ -24,8 +24,9 @@ * Mark a class that it has a MasterServices accessor. Temporary hack until core Coprocesssors are * integrated. * @see CoreCoprocessor - * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * @deprecated Since 2.0.0 to be removed in 4.0.0. The hope was that by 3.0.0 we will not need this + * facility as CoreCoprocessors are integated into core but we failed, so delay the + * removal to 4.0.0. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java index 21301efeafa7..faf9c7e42acf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java @@ -24,8 +24,9 @@ * Mark a class that it has a RegionServiceServices accessor. Temporary hack until core * Coprocesssors are integrated. * @see CoreCoprocessor - * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * @deprecated Since 2.0.0 to be removed in 4.0.0. The hope was that by 3.0.0 we will not need this + * facility as CoreCoprocessors are integated into core but we failed, so delay the + * removal to 4.0.0. */ @Deprecated @InterfaceAudience.Private From 708882c6512a7b3f863ec17731bb0eba004039d1 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 6 May 2024 15:11:48 +0800 Subject: [PATCH 348/514] HBASE-28480 Remove deprecated methods in RegionCoprocessorHost for 3.0.0 (#5873) Signed-off-by: Yi Mei --- .../hbase/coprocessor/RegionObserver.java | 19 +------- .../hadoop/hbase/regionserver/HRegion.java | 13 ----- .../hbase/regionserver/RSRpcServices.java | 20 -------- .../regionserver/RegionCoprocessorHost.java | 38 ++------------- .../SampleRegionWALCoprocessor.java | 48 ++++++++----------- .../coprocessor/SimpleRegionObserver.java | 36 -------------- .../TestRegionObserverInterface.java | 45 ++--------------- .../hbase/coprocessor/TestWALObserver.java | 5 +- 8 files changed, 30 insertions(+), 194 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 018826644acb..21cabcec1f8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -502,8 +502,9 @@ default void preDelete(ObserverContext c, Delete d * @param byteNow - timestamp bytes * @param get - the get formed using the current cell's row. Note that the get does not * specify the family and qualifier - * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with + * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-4.0.0 and replaced with * something that doesn't expose IntefaceAudience.Private classes. + * VisibilityController still needs this, need to change the logic there first. */ @Deprecated default void prePrepareTimeStampForDeleteVersion(ObserverContext c, @@ -1403,22 +1404,6 @@ default void postReplayWALs(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { - } - - /** - * Called after a {@link WALEdit} replayed for this region. - * @param ctx the environment provided by the region server - */ - default void postWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { - } - /** * Called before bulkLoadHFile. Users can create a StoreFile instance to access the contents of a * HFile. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ae4045b1216b..c55090d3a756 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5656,15 +5656,6 @@ private long replayRecoveredEdits(final Path edits, Map maxSeqIdIn currentReplaySeqId = (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : currentEditSeqId; - // Start coprocessor replay here. The coprocessor is for each WALEdit - // instead of a KeyValue. - if (coprocessorHost != null) { - status.setStatus("Running pre-WAL-restore hook in coprocessors"); - if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) { - // if bypass this wal entry, ignore it ... - continue; - } - } boolean checkRowWithinBoundary = false; // Check this edit is for this region. if ( @@ -5733,10 +5724,6 @@ private long replayRecoveredEdits(final Path edits, Map maxSeqIdIn internalFlushcache(null, currentEditSeqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY); } - - if (coprocessorHost != null) { - coprocessorHost.postWALRestore(this.getRegionInfo(), key, val); - } } if (coprocessorHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index a2b9a93263d5..babaa56170ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2089,7 +2089,6 @@ public ReplicateWALEntryResponse replay(final RpcController controller, ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : null; // do not invoke coprocessors if this is a secondary region replica - List> walEntries = new ArrayList<>(); // Skip adding the edits to WAL if this is a secondary region replica boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()); @@ -2111,18 +2110,6 @@ public ReplicateWALEntryResponse replay(final RpcController controller, Pair walEntry = (coprocessorHost == null) ? null : new Pair<>(); List edits = WALSplitUtil.getMutationsFromWALEntry(entry, cells, walEntry, durability); - if (coprocessorHost != null) { - // Start coprocessor replay here. The coprocessor is for each WALEdit instead of a - // KeyValue. - if ( - coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), - walEntry.getSecond()) - ) { - // if bypass this log entry, ignore it ... - continue; - } - walEntries.add(walEntry); - } if (edits != null && !edits.isEmpty()) { // HBASE-17924 // sort to improve lock efficiency @@ -2145,13 +2132,6 @@ public ReplicateWALEntryResponse replay(final RpcController controller, if (wal != null) { wal.sync(); } - - if (coprocessorHost != null) { - for (Pair entry : walEntries) { - coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), - entry.getSecond()); - } - } return ReplicateWALEntryResponse.newBuilder().build(); } catch (IOException ie) { throw new ServiceException(ie); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index ef84ca31f1d5..398c596b63f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -909,8 +909,9 @@ public void call(RegionObserver observer) throws IOException { * @param get - the get that could be used Note that the get only does not specify the family * and qualifier that should be used * @return true if default processing should be bypassed - * @deprecated In hbase-2.0.0. Will be removed in hbase-3.0.0. Added explicitly for a single - * Coprocessor for its needs only. Will be removed. + * @deprecated In hbase-2.0.0. Will be removed in hbase-4.0.0. Added explicitly for a single + * Coprocessor for its needs only. Will be removed. VisibilityController still needs + * this, need to change the logic there first. */ @Deprecated public boolean prePrepareTimeStampForDeleteVersion(final Mutation mutation, final Cell kv, @@ -1386,39 +1387,6 @@ public void call(RegionObserver observer) throws IOException { }); } - /** - * Supports Coprocessor 'bypass'. - * @return true if default behavior should be bypassed, false otherwise - * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with - * something that doesn't expose IntefaceAudience.Private classes. - */ - @Deprecated - public boolean preWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) - throws IOException { - return execOperation( - coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(true) { - @Override - public void call(RegionObserver observer) throws IOException { - observer.preWALRestore(this, info, logKey, logEdit); - } - }); - } - - /** - * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with - * something that doesn't expose IntefaceAudience.Private classes. - */ - @Deprecated - public void postWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) - throws IOException { - execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() { - @Override - public void call(RegionObserver observer) throws IOException { - observer.postWALRestore(this, info, logKey, logEdit); - } - }); - } - /** * @param familyPaths pairs of { CF, file path } submitted for bulk load */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java index 65c42a8250d8..17ab26c6a58d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java @@ -53,10 +53,10 @@ public class SampleRegionWALCoprocessor private boolean preWALWriteCalled = false; private boolean postWALWriteCalled = false; - private boolean preWALRestoreCalled = false; - private boolean postWALRestoreCalled = false; private boolean preWALRollCalled = false; private boolean postWALRollCalled = false; + private boolean preReplayWALsCalled = false; + private boolean postReplayWALsCalled = false; /** * Set values: with a table name, a column name which will be ignored, and a column name which @@ -74,8 +74,6 @@ public void setTestValues(byte[] tableName, byte[] row, byte[] igf, byte[] igq, this.changedQualifier = chq; preWALWriteCalled = false; postWALWriteCalled = false; - preWALRestoreCalled = false; - postWALRestoreCalled = false; preWALRollCalled = false; postWALRollCalled = false; } @@ -132,15 +130,6 @@ public void preWALWrite(ObserverContext env } } - /** - * Triggered before {@link org.apache.hadoop.hbase.regionserver.HRegion} when WAL is Restoreed. - */ - @Override - public void preWALRestore(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { - preWALRestoreCalled = true; - } - @Override public void preWALRoll(ObserverContext ctx, Path oldPath, Path newPath) throws IOException { @@ -153,13 +142,16 @@ public void postWALRoll(ObserverContext ctx postWALRollCalled = true; } - /** - * Triggered after {@link org.apache.hadoop.hbase.regionserver.HRegion} when WAL is Restoreed. - */ @Override - public void postWALRestore(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { - postWALRestoreCalled = true; + public void preReplayWALs(ObserverContext ctx, + RegionInfo info, Path edits) throws IOException { + preReplayWALsCalled = true; + } + + @Override + public void postReplayWALs(ObserverContext ctx, + RegionInfo info, Path edits) throws IOException { + postReplayWALsCalled = true; } public boolean isPreWALWriteCalled() { @@ -170,16 +162,6 @@ public boolean isPostWALWriteCalled() { return postWALWriteCalled; } - public boolean isPreWALRestoreCalled() { - LOG.debug(SampleRegionWALCoprocessor.class.getName() + ".isPreWALRestoreCalled is called."); - return preWALRestoreCalled; - } - - public boolean isPostWALRestoreCalled() { - LOG.debug(SampleRegionWALCoprocessor.class.getName() + ".isPostWALRestoreCalled is called."); - return postWALRestoreCalled; - } - public boolean isPreWALRollCalled() { return preWALRollCalled; } @@ -187,4 +169,12 @@ public boolean isPreWALRollCalled() { public boolean isPostWALRollCalled() { return postWALRollCalled; } + + public boolean isPreReplayWALsCalled() { + return preReplayWALsCalled; + } + + public boolean isPostReplayWALsCalled() { + return postReplayWALsCalled; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index be54f320ef7a..62f65d6d6147 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -128,8 +128,6 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { final AtomicInteger ctPostBatchMutate = new AtomicInteger(0); final AtomicInteger ctPreReplayWALs = new AtomicInteger(0); final AtomicInteger ctPostReplayWALs = new AtomicInteger(0); - final AtomicInteger ctPreWALRestore = new AtomicInteger(0); - final AtomicInteger ctPostWALRestore = new AtomicInteger(0); final AtomicInteger ctPreStoreFileReaderOpen = new AtomicInteger(0); final AtomicInteger ctPostStoreFileReaderOpen = new AtomicInteger(0); final AtomicInteger ctPostBatchMutateIndispensably = new AtomicInteger(0); @@ -683,24 +681,6 @@ public void postReplayWALs(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { - String tableName = logKey.getTableName().getNameAsString(); - if (tableName.equals(TABLE_SKIPPED)) { - // skip recovery of TABLE_SKIPPED for testing purpose - env.bypass(); - return; - } - ctPreWALRestore.incrementAndGet(); - } - - @Override - public void postWALRestore(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { - ctPostWALRestore.incrementAndGet(); - } - @Override public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, @@ -915,14 +895,6 @@ public boolean hadPostReplayWALs() { return ctPostReplayWALs.get() > 0; } - public boolean hadPreWALRestore() { - return ctPreWALRestore.get() > 0; - } - - public boolean hadPostWALRestore() { - return ctPostWALRestore.get() > 0; - } - public boolean wasScannerNextCalled() { return ctPreScannerNext.get() > 0 && ctPostScannerNext.get() > 0; } @@ -1035,14 +1007,6 @@ public int getCtPostReplayWALs() { return ctPostReplayWALs.get(); } - public int getCtPreWALRestore() { - return ctPreWALRestore.get(); - } - - public int getCtPostWALRestore() { - return ctPostWALRestore.get(); - } - public int getCtPreWALAppend() { return ctPreWALAppend.get(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 36e3ef1e0ff3..3787acbbf252 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -767,9 +767,8 @@ public void testRecovery() throws Exception { tableName, new Boolean[] { false, false, true, true, true, true, false }); verifyMethodResult(SimpleRegionObserver.class, - new String[] { "getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore", - "getCtPostWALRestore", "getCtPrePut", "getCtPostPut" }, - tableName, new Integer[] { 0, 0, 0, 0, 2, 2 }); + new String[] { "getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPrePut", "getCtPostPut" }, + tableName, new Integer[] { 0, 0, 2, 2 }); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(1000); // Let the kill soak in. @@ -777,50 +776,14 @@ public void testRecovery() throws Exception { LOG.info("All regions assigned"); verifyMethodResult(SimpleRegionObserver.class, - new String[] { "getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore", - "getCtPostWALRestore", "getCtPrePut", "getCtPostPut" }, - tableName, new Integer[] { 1, 1, 2, 2, 0, 0 }); + new String[] { "getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPrePut", "getCtPostPut" }, + tableName, new Integer[] { 1, 1, 0, 0 }); } finally { util.deleteTable(tableName); table.close(); } } - @Test - public void testPreWALRestoreSkip() throws Exception { - LOG.info(TestRegionObserverInterface.class.getName() + "." + name.getMethodName()); - TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED); - Table table = util.createTable(tableName, new byte[][] { A, B, C }); - - try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) { - JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer(); - ServerName sn2 = rs1.getRegionServer().getServerName(); - String regEN = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - - util.getAdmin().move(Bytes.toBytes(regEN), sn2); - while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) { - Thread.sleep(100); - } - - Put put = new Put(ROW); - put.addColumn(A, A, A); - put.addColumn(B, B, B); - put.addColumn(C, C, C); - table.put(put); - - cluster.killRegionServer(rs1.getRegionServer().getServerName()); - Threads.sleep(20000); // just to be sure that the kill has fully started. - util.waitUntilAllRegionsAssigned(tableName); - } - - verifyMethodResult(SimpleRegionObserver.class, - new String[] { "getCtPreWALRestore", "getCtPostWALRestore", }, tableName, - new Integer[] { 0, 0 }); - - util.deleteTable(tableName); - table.close(); - } - // called from testPreWALAppendIsWrittenToWAL private void testPreWALAppendHook(Table table, TableName tableName) throws IOException { int expectedCalls = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index be80d92bf570..105c57b55ea0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -355,10 +355,9 @@ public Void run() throws Exception { SampleRegionWALCoprocessor cp2 = region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class); - // TODO: asserting here is problematic. assertNotNull(cp2); - assertTrue(cp2.isPreWALRestoreCalled()); - assertTrue(cp2.isPostWALRestoreCalled()); + assertTrue(cp2.isPreReplayWALsCalled()); + assertTrue(cp2.isPostReplayWALsCalled()); region.close(); wals2.close(); return null; From ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b Mon Sep 17 00:00:00 2001 From: Vincent Poon Date: Mon, 6 May 2024 00:16:19 -0700 Subject: [PATCH 349/514] HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index 3879cb7ba911..5af7de5678c0 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -460,6 +460,11 @@ public List getMetaReplicaNodes() throws KeeperException { public List getMetaReplicaNodesAndWatchChildren() throws KeeperException { List childrenOfBaseNode = ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode); + // Need to throw here instead of returning an empty list if the base znode hasn't been created + // Caller should retry in that case, versus thinking the base znode has a watcher set + if (childrenOfBaseNode == null) { + keeperException(new KeeperException.NoNodeException(znodePaths.baseZNode)); + } return filterMetaReplicaNodes(childrenOfBaseNode); } From 917f2f1ec0c0fa113cbcdc8ca1332c0e7f9b481b Mon Sep 17 00:00:00 2001 From: Sravishtta Kommineni <49591501+ksravista@users.noreply.github.com> Date: Mon, 6 May 2024 04:15:32 -0400 Subject: [PATCH 350/514] HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer (#5851) Co-authored-by: Sravi Kommineni Signed-off-by: Duo Zhang Reviewed-by: Ray Mattingly --- .../org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index fcbcd2d9f59f..2e288f246808 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -86,8 +86,8 @@ import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter; import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -194,7 +194,7 @@ static RecordWriter createRecordWrit final TaskAttemptContext context, final OutputCommitter committer) throws IOException { // Get the path of the temporary output file - final Path outputDir = ((FileOutputCommitter) committer).getWorkPath(); + final Path outputDir = ((PathOutputCommitter) committer).getWorkPath(); final Configuration conf = context.getConfiguration(); final boolean writeMultipleTables = conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); From bcd6205f9d00627b8015fcb014a5888419b85a13 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 6 May 2024 21:58:35 +0800 Subject: [PATCH 351/514] HBASE-28566 Remove ZKDataMigrator (#5875) Signed-off-by: Yi Mei --- .../hadoop/hbase/util/ZKDataMigrator.java | 120 ------------------ 1 file changed, 120 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java deleted file mode 100644 index 5f4ddb3821ea..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; - -/** - * Utlity method to migrate zookeeper data across HBase versions. - * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. - */ -@Deprecated -@InterfaceAudience.Private -public class ZKDataMigrator { - private static final Logger LOG = LoggerFactory.getLogger(ZKDataMigrator.class); - - // Shutdown constructor. - private ZKDataMigrator() { - } - - /** - * Method for table states migration. Used when upgrading from pre-2.0 to 2.0 Reading state from - * zk, applying them to internal state and delete. Used by master to clean migration from zk based - * states to table descriptor based states. - * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. - */ - @Deprecated - public static Map queryForTableStates(ZKWatcher zkw) - throws KeeperException, InterruptedException { - Map rv = new HashMap<>(); - List children = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().tableZNode); - if (children == null) return rv; - for (String child : children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName); - TableState.State newState = TableState.State.ENABLED; - if (state != null) { - switch (state) { - case ENABLED: - newState = TableState.State.ENABLED; - break; - case DISABLED: - newState = TableState.State.DISABLED; - break; - case DISABLING: - newState = TableState.State.DISABLING; - break; - case ENABLING: - newState = TableState.State.ENABLING; - break; - default: - } - } - rv.put(tableName, newState); - } - return rv; - } - - /** - * Gets table state from ZK. - * @param zkw ZKWatcher instance to use - * @param tableName table we're checking - * @return Null or - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} - * found in znode. - * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. - */ - @Deprecated - private static ZooKeeperProtos.DeprecatedTableState.State getTableState(final ZKWatcher zkw, - final TableName tableName) throws KeeperException, InterruptedException { - String znode = - ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, tableName.getNameAsString()); - byte[] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.DeprecatedTableState.Builder builder = - ZooKeeperProtos.DeprecatedTableState.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); - return builder.getState(); - } catch (IOException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } -} From 156e430dc56211c0aea15d792e8733b1b0e3de5c Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 7 May 2024 07:36:21 +0200 Subject: [PATCH 352/514] HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870) Signed-off-by: Duo Zhang --- .../hadoop/hbase/rest/MultiRowResource.java | 11 +-- .../hbase/rest/ProtobufStreamingOutput.java | 17 +--- .../apache/hadoop/hbase/rest/RestUtil.java | 48 +++++++++++ .../apache/hadoop/hbase/rest/RowResource.java | 9 +- .../hbase/rest/ScannerInstanceResource.java | 23 +++-- .../hadoop/hbase/rest/TableScanResource.java | 12 +-- .../hadoop/hbase/rest/model/CellModel.java | 86 ++++++++++++++++--- .../hadoop/hbase/rest/model/CellSetModel.java | 18 +++- .../hadoop/hbase/rest/model/RowModel.java | 64 ++++++++++++-- 9 files changed, 215 insertions(+), 73 deletions(-) create mode 100644 hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 99fc0c845e6f..8cce772472a8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -22,14 +22,10 @@ import java.util.Base64; import java.util.Base64.Decoder; import java.util.List; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.ParseFilter; -import org.apache.hadoop.hbase.rest.model.CellModel; import org.apache.hadoop.hbase.rest.model.CellSetModel; -import org.apache.hadoop.hbase.rest.model.RowModel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -125,12 +121,7 @@ public Response get(final @Context UriInfo uriInfo, if (r.isEmpty()) { continue; } - RowModel rowModel = new RowModel(r.getRow()); - for (Cell c : r.listCells()) { - rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), - c.getTimestamp(), CellUtil.cloneValue(c))); - } - model.addRow(rowModel); + model.addRow(RestUtil.createRowModelFromResult(r)); } if (model.getRows().isEmpty()) { // If no rows found. diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java index eadd6a9334bc..60c3d363ec32 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java @@ -19,14 +19,9 @@ import java.io.IOException; import java.io.OutputStream; -import java.util.List; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.rest.model.CellModel; import org.apache.hadoop.hbase.rest.model.CellSetModel; -import org.apache.hadoop.hbase.rest.model.RowModel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -91,15 +86,11 @@ private void writeToStream(CellSetModel model, String contentType, OutputStream private CellSetModel createModelFromResults(Result[] results) { CellSetModel cellSetModel = new CellSetModel(); - for (Result rs : results) { - byte[] rowKey = rs.getRow(); - RowModel rModel = new RowModel(rowKey); - List kvs = rs.listCells(); - for (Cell kv : kvs) { - rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - kv.getTimestamp(), CellUtil.cloneValue(kv))); + for (int i = 0; i < results.length; i++) { + if (results[i].isEmpty()) { + continue; } - cellSetModel.addRow(rModel); + cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i])); } return cellSetModel; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java new file mode 100644 index 000000000000..5f884c510d6d --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public final class RestUtil { + + private RestUtil() { + // Do not instantiate + } + + /** + * Speed-optimized method to convert an HBase result to a RowModel. Avoids iterators and uses the + * non-cloning constructors to minimize overhead, especially when using protobuf marshalling. + * @param r non-empty Result object + */ + public static RowModel createRowModelFromResult(Result r) { + Cell firstCell = r.rawCells()[0]; + RowModel rowModel = + new RowModel(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength()); + int cellsLength = r.rawCells().length; + for (int i = 0; i < cellsLength; i++) { + rowModel.addCell(new CellModel(r.rawCells()[i])); + } + return rowModel; + } +} diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index 1f0c75ae4814..20c896ae82fb 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -112,8 +112,7 @@ public Response get(final @Context UriInfo uriInfo) { rowKey = CellUtil.cloneRow(value); rowModel = new RowModel(rowKey); } - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), - value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(value)); if (++count > rowspec.getMaxValues()) { break; } @@ -711,8 +710,7 @@ Response append(final CellSetModel model) { CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { - rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + rRowModel.addCell(new CellModel(cell)); } rModel.addRow(rRowModel); servlet.getMetrics().incrementSucessfulAppendRequests(1); @@ -803,8 +801,7 @@ Response increment(final CellSetModel model) { CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { - rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + rRowModel.addCell(new CellModel(cell)); } rModel.addRow(rowModel); servlet.getMetrics().incrementSucessfulIncrementRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index 81ab8e24692f..951cafc8632a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -83,7 +83,9 @@ public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows } CellSetModel model = new CellSetModel(); RowModel rowModel = null; - byte[] rowKey = null; + byte[] rowKeyArray = null; + int rowKeyOffset = 0; + int rowKeyLength = 0; int limit = batch; if (maxValues > 0) { limit = maxValues; @@ -121,11 +123,13 @@ public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows } break; } - if (rowKey == null) { - rowKey = CellUtil.cloneRow(value); - rowModel = new RowModel(rowKey); + if (rowKeyArray == null) { + rowKeyArray = value.getRowArray(); + rowKeyOffset = value.getRowOffset(); + rowKeyLength = value.getRowLength(); + rowModel = new RowModel(rowKeyArray, rowKeyOffset, rowKeyLength); } - if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) { + if (!CellUtil.matchingRow(value, rowKeyArray, rowKeyOffset, rowKeyLength)) { // if maxRows was given as a query param, stop if we would exceed the // specified number of rows if (maxRows > 0) { @@ -135,11 +139,12 @@ public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows } } model.addRow(rowModel); - rowKey = CellUtil.cloneRow(value); - rowModel = new RowModel(rowKey); + rowKeyArray = value.getRowArray(); + rowKeyOffset = value.getRowOffset(); + rowKeyLength = value.getRowLength(); + rowModel = new RowModel(rowKeyArray, rowKeyOffset, rowKeyLength); } - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), - value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(value)); } while (--count > 0); model.addRow(rowModel); ResponseBuilder response = Response.ok(model); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java index e30beaa37df7..4bb30b0cc3c7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -22,16 +22,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; -import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.rest.model.CellModel; import org.apache.hadoop.hbase.rest.model.RowModel; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -87,13 +83,7 @@ public RowModel next() { if ((rs == null) || (count <= 0)) { return null; } - byte[] rowKey = rs.getRow(); - RowModel rModel = new RowModel(rowKey); - List kvs = rs.listCells(); - for (Cell kv : kvs) { - rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - kv.getTimestamp(), CellUtil.cloneValue(kv))); - } + RowModel rModel = RestUtil.createRowModelFromResult(rs); count--; if (count == 0) { results.close(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index eda3267bf58b..4284727e4380 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.rest.model; +import static org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIMITER; + +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; @@ -58,10 +61,11 @@ * */ @XmlRootElement(name = "Cell") -@XmlAccessorType(XmlAccessType.FIELD) +@XmlAccessorType(XmlAccessType.NONE) @InterfaceAudience.Private public class CellModel implements ProtobufMessageHandler, Serializable { private static final long serialVersionUID = 1L; + public static final int MAGIC_LENGTH = -1; @JsonProperty("column") @XmlAttribute @@ -71,10 +75,17 @@ public class CellModel implements ProtobufMessageHandler, Serializable { @XmlAttribute private long timestamp = HConstants.LATEST_TIMESTAMP; - @JsonProperty("$") - @XmlValue + // If valueLength = -1, this represents the cell's value. + // If valueLength <> 1, this represents an array containing the cell's value as determined by + // offset and length. private byte[] value; + @JsonIgnore + private int valueOffset; + + @JsonIgnore + private int valueLength = MAGIC_LENGTH; + /** * Default constructor */ @@ -96,11 +107,16 @@ public CellModel(byte[] column, byte[] qualifier, byte[] value) { } /** - * Constructor from KeyValue + * Constructor from KeyValue This avoids copying the value from the cell, and tries to optimize + * generating the column value. */ public CellModel(org.apache.hadoop.hbase.Cell cell) { - this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), - CellUtil.cloneValue(cell)); + this.column = makeColumn(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); + this.timestamp = cell.getTimestamp(); + this.value = cell.getValueArray(); + this.valueOffset = cell.getValueOffset(); + this.valueLength = cell.getValueLength(); } /** @@ -109,16 +125,16 @@ public CellModel(org.apache.hadoop.hbase.Cell cell) { public CellModel(byte[] column, long timestamp, byte[] value) { this.column = column; this.timestamp = timestamp; - this.value = value; + setValue(value); } /** * Constructor */ - public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) { - this.column = CellUtil.makeColumn(column, qualifier); + public CellModel(byte[] family, byte[] qualifier, long timestamp, byte[] value) { + this.column = CellUtil.makeColumn(family, qualifier); this.timestamp = timestamp; - this.value = value; + setValue(value); } /** Returns the column */ @@ -151,22 +167,49 @@ public void setTimestamp(long timestamp) { } /** Returns the value */ + @JsonProperty("$") + @XmlValue public byte[] getValue() { + if (valueLength == MAGIC_LENGTH) { + return value; + } else { + byte[] retValue = new byte[valueLength]; + System.arraycopy(value, valueOffset, retValue, 0, valueLength); + return retValue; + } + } + + /** Returns the backing array for value (may be the same as value) */ + public byte[] getValueArray() { return value; } /** * @param value the value to set */ + @JsonProperty("$") public void setValue(byte[] value) { this.value = value; + this.valueLength = MAGIC_LENGTH; + } + + public int getValueOffset() { + return valueOffset; + } + + public int getValueLength() { + return valueLength; } @Override public byte[] createProtobufOutput() { Cell.Builder builder = Cell.newBuilder(); builder.setColumn(UnsafeByteOperations.unsafeWrap(getColumn())); - builder.setData(UnsafeByteOperations.unsafeWrap(getValue())); + if (valueLength == MAGIC_LENGTH) { + builder.setData(UnsafeByteOperations.unsafeWrap(getValue())); + } else { + builder.setData(UnsafeByteOperations.unsafeWrap(value, valueOffset, valueLength)); + } if (hasUserTimestamp()) { builder.setTimestamp(getTimestamp()); } @@ -185,6 +228,21 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce return this; } + /** + * Makes a column in family:qualifier form from separate byte arrays with offset and length. + *

+ * Not recommended for usage as this is old-style API. + * @return family:qualifier + */ + public static byte[] makeColumn(byte[] family, int familyOffset, int familyLength, + byte[] qualifier, int qualifierOffset, int qualifierLength) { + byte[] column = new byte[familyLength + qualifierLength + 1]; + System.arraycopy(family, familyOffset, column, 0, familyLength); + column[familyLength] = COLUMN_FAMILY_DELIMITER; + System.arraycopy(qualifier, qualifierOffset, column, familyLength + 1, qualifierLength); + return column; + } + @Override public boolean equals(Object obj) { if (obj == null) { @@ -198,17 +256,17 @@ public boolean equals(Object obj) { } CellModel cellModel = (CellModel) obj; return new EqualsBuilder().append(column, cellModel.column) - .append(timestamp, cellModel.timestamp).append(value, cellModel.value).isEquals(); + .append(timestamp, cellModel.timestamp).append(getValue(), cellModel.getValue()).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder().append(column).append(timestamp).append(value).toHashCode(); + return new HashCodeBuilder().append(column).append(timestamp).append(getValue()).toHashCode(); } @Override public String toString() { return new ToStringBuilder(this).append("column", column).append("timestamp", timestamp) - .append("value", value).toString(); + .append("value", getValue()).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index fff96b3486c2..8908ec7e6c88 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.rest.model; +import static org.apache.hadoop.hbase.rest.model.CellModel.MAGIC_LENGTH; + import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; @@ -69,7 +71,7 @@ * */ @XmlRootElement(name = "CellSet") -@XmlAccessorType(XmlAccessType.FIELD) +@XmlAccessorType(XmlAccessType.NONE) @InterfaceAudience.Private public class CellSetModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -110,11 +112,21 @@ public byte[] createProtobufOutput() { CellSet.Builder builder = CellSet.newBuilder(); for (RowModel row : getRows()) { CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder(); - rowBuilder.setKey(UnsafeByteOperations.unsafeWrap(row.getKey())); + if (row.getKeyLength() == MAGIC_LENGTH) { + rowBuilder.setKey(UnsafeByteOperations.unsafeWrap(row.getKey())); + } else { + rowBuilder.setKey(UnsafeByteOperations.unsafeWrap(row.getKeyArray(), row.getKeyOffset(), + row.getKeyLength())); + } for (CellModel cell : row.getCells()) { Cell.Builder cellBuilder = Cell.newBuilder(); cellBuilder.setColumn(UnsafeByteOperations.unsafeWrap(cell.getColumn())); - cellBuilder.setData(UnsafeByteOperations.unsafeWrap(cell.getValue())); + if (cell.getValueLength() == MAGIC_LENGTH) { + cellBuilder.setData(UnsafeByteOperations.unsafeWrap(cell.getValue())); + } else { + cellBuilder.setData(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); + } if (cell.hasUserTimestamp()) { cellBuilder.setTimestamp(cell.getTimestamp()); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index f3e892aba0c2..8b660ac362fc 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.rest.model; +import static org.apache.hadoop.hbase.rest.model.CellModel.MAGIC_LENGTH; + import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; @@ -49,15 +51,18 @@ * */ @XmlRootElement(name = "Row") -@XmlAccessorType(XmlAccessType.FIELD) +@XmlAccessorType(XmlAccessType.NONE) @InterfaceAudience.Private public class RowModel implements ProtobufMessageHandler, Serializable { private static final long serialVersionUID = 1L; - @JsonProperty("key") - @XmlAttribute + // If keyLength = -1, this represents the key + // If keyLength <> -1, this represents the base array, and key is determined by offset and length private byte[] key; + private int keyOffset = 0; + private int keyLength = MAGIC_LENGTH; + @JsonProperty("Cell") @XmlElement(name = "Cell") private List cells = new ArrayList<>(); @@ -81,7 +86,18 @@ public RowModel(final String key) { * @param key the row key */ public RowModel(final byte[] key) { + setKey(key); + cells = new ArrayList<>(); + } + + /** + * Constructor + * @param key the row key as represented in the Cell + */ + public RowModel(final byte[] key, int keyOffset, int keyLength) { this.key = key; + this.keyOffset = keyOffset; + this.keyLength = keyLength; cells = new ArrayList<>(); } @@ -100,7 +116,17 @@ public RowModel(final String key, final List cells) { * @param cells the cells */ public RowModel(final byte[] key, final List cells) { - this.key = key; + this(key); + this.cells = cells; + } + + /** + * Constructor + * @param key the row key + * @param cells the cells + */ + public RowModel(final byte[] key, int keyOffset, int keyLength, final List cells) { + this(key, keyOffset, keyLength); this.cells = cells; } @@ -113,15 +139,38 @@ public void addCell(CellModel cell) { } /** Returns the row key */ + @XmlAttribute + @JsonProperty("key") public byte[] getKey() { + if (keyLength == MAGIC_LENGTH) { + return key; + } else { + byte[] retKey = new byte[keyLength]; + System.arraycopy(key, keyOffset, retKey, 0, keyLength); + return retKey; + } + } + + /** Returns the backing row key array */ + public byte[] getKeyArray() { return key; } /** * @param key the row key */ + @JsonProperty("key") public void setKey(byte[] key) { this.key = key; + this.keyLength = MAGIC_LENGTH; + } + + public int getKeyOffset() { + return keyOffset; + } + + public int getKeyLength() { + return keyLength; } /** Returns the cells */ @@ -153,16 +202,17 @@ public boolean equals(Object obj) { return false; } RowModel rowModel = (RowModel) obj; - return new EqualsBuilder().append(key, rowModel.key).append(cells, rowModel.cells).isEquals(); + return new EqualsBuilder().append(getKey(), rowModel.getKey()).append(cells, rowModel.cells) + .isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder().append(key).append(cells).toHashCode(); + return new HashCodeBuilder().append(getKey()).append(cells).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this).append("key", key).append("cells", cells).toString(); + return new ToStringBuilder(this).append("key", getKey()).append("cells", cells).toString(); } } From c2ea9a1c8d1993e041daa69bd435a4762a6eb6aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 21:29:46 +0800 Subject: [PATCH 353/514] HBASE-28574 Bump jinja2 from 3.1.3 to 3.1.4 in /dev-support/flaky-tests (#5879) Bumps [jinja2](https://github.com/pallets/jinja) from 3.1.3 to 3.1.4. - [Release notes](https://github.com/pallets/jinja/releases) - [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/jinja/compare/3.1.3...3.1.4) --- updated-dependencies: - dependency-name: jinja2 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index 8ef087b0036d..5269993fb9ba 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -19,4 +19,4 @@ requests==2.31.0 future==0.18.3 gitpython==3.1.41 rbtools==4.0 -jinja2==3.1.3 +jinja2==3.1.4 From c4f01ede674bcc7098edc88032c5cb6edae02876 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 7 May 2024 21:46:36 +0800 Subject: [PATCH 354/514] HBASE-28571 Remove deprecated methods map reduce utils (#5878) Signed-off-by: Xin Sun --- .../mapreduce/MapReduceHFileSplitterJob.java | 2 +- ...rationTestBigLinkedListWithVisibility.java | 6 ++-- .../hbase/mapreduce/TableMapReduceUtil.java | 36 ------------------- 3 files changed, 5 insertions(+), 39 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index 755b0a41e32c..04180972885e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -122,7 +122,7 @@ public Job createSubmittableJob(String[] args) throws IOException { } LOG.debug("success configuring load incremental job"); - TableMapReduceUtil.addDependencyJars(job.getConfiguration(), + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); } else { throw new IOException("No bulk output directory specified"); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index a25a1e05909a..38c91fcb37c3 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -277,7 +277,8 @@ public int runCopier(String outputDir) throws Exception { job.getConfiguration().setBoolean("mapreduce.reduce.speculative", false); TableMapReduceUtil.initTableReducerJob(COMMON_TABLE_NAME, null, job, null, null, null, null); TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), + AbstractHBaseTool.class); TableMapReduceUtil.initCredentials(job); job.setNumReduceTasks(0); boolean success = job.waitForCompletion(true); @@ -430,7 +431,8 @@ private int doVerify(Path outputDir, int numReducers) TableMapReduceUtil.initTableMapperJob(tableName.getName(), scan, VerifyMapper.class, BytesWritable.class, BytesWritable.class, job); - TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), + AbstractHBaseTool.class); job.getConfiguration().setBoolean("mapreduce.map.speculative", false); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index dc3915501b2f..f189767a7c76 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -481,26 +481,6 @@ public static void initCredentials(Job job) throws IOException { } } - /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user and - * add it to the credentials for the given map reduce job. The quorumAddress is the key to the ZK - * ensemble, which contains: hbase.zookeeper.quorum, hbase.zookeeper.client.port and - * zookeeper.znode.parent - * @param job The job that requires the permission. - * @param quorumAddress string that contains the 3 required configuratins - * @throws IOException When the authentication token cannot be obtained. - * @deprecated Since 1.2.0 and will be removed in 3.0.0. Use - * {@link #initCredentialsForCluster(Job, Configuration)} instead. - * @see #initCredentialsForCluster(Job, Configuration) - * @see HBASE-14886 - */ - @Deprecated - public static void initCredentialsForCluster(Job job, String quorumAddress) throws IOException { - Configuration peerConf = - HBaseConfiguration.createClusterConf(job.getConfiguration(), quorumAddress); - initCredentialsForCluster(job, peerConf); - } - /** * Obtain an authentication token, for the specified cluster, on behalf of the current user and * add it to the credentials for the given map reduce job. @@ -781,22 +761,6 @@ public static void addDependencyJars(Job job) throws IOException { } } - /** - * Add the jars containing the given classes to the job's configuration such that JobClient will - * ship them to the cluster and add them to the DistributedCache. - * @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)} - * instead. - * @see #addDependencyJars(Job) - * @see HBASE-8386 - */ - @Deprecated - public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { - LOG.warn("The addDependencyJars(Configuration, Class...) method has been deprecated since it" - + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " - + "instead. See HBASE-8386 for more details."); - addDependencyJarsForClasses(conf, classes); - } - /** * Add the jars containing the given classes to the job's configuration such that JobClient will * ship them to the cluster and add them to the DistributedCache. N.B. that this method at most From f750de2b1129c4be5fea0fb8434797b1a681f3a7 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 8 May 2024 15:30:30 +0800 Subject: [PATCH 355/514] HBASE-28570 Remove deprecated fields in HBTU (#5877) Signed-off-by: Xin Sun --- .../apache/hadoop/hbase/HBaseTestingUtil.java | 27 +++++-------------- .../hadoop/hbase/HBaseTestingUtility.java | 27 +++++-------------- 2 files changed, 14 insertions(+), 40 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 79b041f3421a..dcdf55a945b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -183,16 +183,6 @@ @InterfaceStability.Evolving public class HBaseTestingUtil extends HBaseZKTestingUtil { - /** - * System property key to get test directory value. Name is as it is because mini dfs has - * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property - * used in mini dfs. - * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs. - * @see HBASE-19410 - */ - @Deprecated - private static final String TEST_DIRECTORY_KEY = "test.build.data"; - public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; /** * The default number of regions per regionserver when creating a pre-split table. @@ -384,13 +374,12 @@ public void setHBaseCluster(HBaseClusterInterface hbaseCluster) { /** * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can - * have many concurrent tests running if we need to. It needs to amend the - * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on. - * Moding a System property is not the way to do concurrent instances -- another instance could - * grab the temporary value unintentionally -- but not anything can do about it at moment; single - * instance only is how the minidfscluster works. We also create the underlying directory names - * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the - * conf, and as a system property for hadoop.tmp.dir (We do not create them!). + * have many concurrent tests running if we need to. Moding a System property is not the way to do + * concurrent instances -- another instance could grab the temporary value unintentionally -- but + * not anything can do about it at moment; single instance only is how the minidfscluster works. + * We also create the underlying directory names for hadoop.log.dir, mapreduce.cluster.local.dir + * and hadoop.tmp.dir, and set the values in the conf, and as a system property for hadoop.tmp.dir + * (We do not create them!). * @return The calculated data test build directory, if newly-created. */ @Override @@ -677,8 +666,7 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE */ private void createDirsAndSetProperties() throws IOException { setupClusterTestDir(); - conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath()); - System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath()); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, clusterTestDir.getCanonicalPath()); createDirAndSetProperty("test.cache.data"); createDirAndSetProperty("hadoop.tmp.dir"); hadoopLogDir = createDirAndSetProperty("hadoop.log.dir"); @@ -832,7 +820,6 @@ public SingleProcessHBaseCluster startMiniCluster(StartTestingClusterOption opti miniClusterRunning = true; setupClusterTestDir(); - System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath()); // Bring up mini dfs cluster. This spews a bunch of warnings about missing // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'. diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 5329408e4e69..8cdf2719db93 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -171,16 +171,6 @@ @Deprecated public class HBaseTestingUtility extends HBaseZKTestingUtility { - /** - * System property key to get test directory value. Name is as it is because mini dfs has - * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property - * used in mini dfs. - * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs. - * @see HBASE-19410 - */ - @Deprecated - private static final String TEST_DIRECTORY_KEY = "test.build.data"; - public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; /** * The default number of regions per regionserver when creating a pre-split table. @@ -372,13 +362,12 @@ public void setHBaseCluster(HBaseCluster hbaseCluster) { /** * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can - * have many concurrent tests running if we need to. It needs to amend the - * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on. - * Moding a System property is not the way to do concurrent instances -- another instance could - * grab the temporary value unintentionally -- but not anything can do about it at moment; single - * instance only is how the minidfscluster works. We also create the underlying directory names - * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the - * conf, and as a system property for hadoop.tmp.dir (We do not create them!). + * have many concurrent tests running if we need to. Moding a System property is not the way to do + * concurrent instances -- another instance could grab the temporary value unintentionally -- but + * not anything can do about it at moment; single instance only is how the minidfscluster works. + * We also create the underlying directory names for hadoop.log.dir, mapreduce.cluster.local.dir + * and hadoop.tmp.dir, and set the values in the conf, and as a system property for hadoop.tmp.dir + * (We do not create them!). * @return The calculated data test build directory, if newly-created. */ @Override @@ -663,8 +652,7 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE */ private void createDirsAndSetProperties() throws IOException { setupClusterTestDir(); - conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath()); - System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath()); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, clusterTestDir.getCanonicalPath()); createDirAndSetProperty("test.cache.data"); createDirAndSetProperty("hadoop.tmp.dir"); hadoopLogDir = createDirAndSetProperty("hadoop.log.dir"); @@ -1066,7 +1054,6 @@ public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws E miniClusterRunning = true; setupClusterTestDir(); - System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath()); // Bring up mini dfs cluster. This spews a bunch of warnings about missing // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'. From 2a7aa0d439d8bb17be2cccc057f3d2ec615f7690 Mon Sep 17 00:00:00 2001 From: guluo Date: Wed, 8 May 2024 15:31:32 +0800 Subject: [PATCH 356/514] HBASE-28575 Always printing error log when snapshot table (#5880) Signed-off-by: Duo Zhang --- .../hadoop/hbase/master/snapshot/TakeSnapshotHandler.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index f746adf0b89e..b24f79494045 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -252,11 +252,11 @@ public void process() { try { // if the working dir is still present, the snapshot has failed. it is present we delete // it. - if (!workingDirFs.delete(workingDir, true)) { - LOG.error("Couldn't delete snapshot working directory:" + workingDir); + if (workingDirFs.exists(workingDir) && !workingDirFs.delete(workingDir, true)) { + LOG.error("Couldn't delete snapshot working directory: {}", workingDir); } } catch (IOException e) { - LOG.error("Couldn't delete snapshot working directory:" + workingDir); + LOG.error("Couldn't get or delete snapshot working directory: {}", workingDir, e); } if (LOG.isDebugEnabled()) { LOG.debug("Table snapshot journal : \n" + status.prettyPrintJournal()); From 23fa363d360202db73a0d1b517e29841a904026c Mon Sep 17 00:00:00 2001 From: Minwoo Kang Date: Wed, 8 May 2024 16:34:03 +0900 Subject: [PATCH 357/514] HBASE-28563 Closing ZooKeeper in ZKMainServer (#5869) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Duo Zhang Reviewed-by: Andor Molnár --- .../hadoop/hbase/zookeeper/ZKMainServer.java | 31 ++++++++++++++----- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java index 623085b5923e..ce849fea1a42 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hbase.zookeeper; +import java.io.Closeable; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.ZooKeeperMain; import org.apache.zookeeper.cli.CliException; @@ -38,10 +40,11 @@ public String parse(final Configuration c) { } /** - * ZooKeeper 3.4.6 broke being able to pass commands on command line. See ZOOKEEPER-1897. This - * class is a hack to restore this faclity. + * ZooKeeper 3.4.6 broke being able to pass commands on command line. See ZOOKEEPER-1897, + * ZOOKEEPER-4804. This class is a hack to restore this faclity. */ - private static class HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain { + private static class HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain + implements Closeable { public HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args) throws IOException, InterruptedException { super(args); @@ -49,7 +52,11 @@ public HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args) // run the command without being connected, we get ConnectionLoss KeeperErrorConnection... // Make it 30seconds. We dont' have a config in this context and zk doesn't have // a timeout until after connection. 30000ms is default for zk. - ZooKeeperHelper.ensureConnectedZooKeeper(this.zk, 30000); + try { + ZooKeeperHelper.ensureConnectedZooKeeper(this.zk, 30000); + } catch (ZooKeeperConnectionException e) { + this.zk.close(); + } } /** @@ -62,6 +69,15 @@ void runCmdLine() throws IOException, InterruptedException, CliException { processCmd(this.cl); System.exit(0); } + + @Override + public void close() throws IOException { + try { + this.zk.close(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } } /** @@ -109,9 +125,10 @@ public static void main(String[] args) throws Exception { // ZOOKEEPER-1897 was committed to zookeeper-3.4.6 but elsewhere in this class we say // 3.4.6 breaks command-processing; TODO. if (hasCommandLineArguments(args)) { - HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain zkm = - new HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(newArgs); - zkm.runCmdLine(); + try (HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain zkm = + new HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(newArgs)) { + zkm.runCmdLine(); + } } else { ZooKeeperMain.main(newArgs); } From 328df6abf3e1dca21fe882f4de2329aa9fd40616 Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Sun, 12 May 2024 21:50:38 +0800 Subject: [PATCH 358/514] HBASE-28576 Remove FirstKeyValueMatchingQualifiersFilter (#5891) Signed-off-by: Duo Zhang --- ...FirstKeyValueMatchingQualifiersFilter.java | 47 ------------------- .../TestPartialResultsFromClientSide.java | 6 --- 2 files changed, 53 deletions(-) delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java deleted file mode 100644 index 67354b304f5a..000000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.yetus.audience.InterfaceAudience; - -/** - * This filter was deprecated in 2.0.0 and should be removed in 3.0.0. We keep the code here to - * prevent the proto serialization exceptions puzzle those users who use older version clients to - * communicate with newer version servers. - * @deprecated Deprecated in 2.0.0 and will be removed in 3.0.0. - * @see HBASE-13347 - */ -@InterfaceAudience.Public -@Deprecated -public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { - - /** - * Parses a serialized representation of {@link FirstKeyValueMatchingQualifiersFilter} - * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance - * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from - * bytes - * @throws DeserializationException if an error occurred - * @see #toByteArray - */ - public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { - throw new DeserializationException( - "Stop using FirstKeyValueMatchingQualifiersFilter, which has been permanently removed"); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 1d50bfa37ba0..299f833c7c81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.filter.ColumnRangeFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; import org.apache.hadoop.hbase.filter.RandomRowFilter; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -774,10 +772,6 @@ public void testPartialResultsWithColumnFilter() throws Exception { testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5"))); testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true, Bytes.toBytes("testQualifier7"), true)); - - // Throw an Exception to the old version client to remind them not to use this filter anymore - assertThrows("Stop using", DoNotRetryIOException.class, - () -> testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter())); } public void testPartialResultsWithColumnFilter(Filter filter) throws Exception { From ca340100535d6c2c43b979e26c9291d892153f1a Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Sun, 12 May 2024 21:51:28 +0800 Subject: [PATCH 359/514] HBASE-28581 Remove deprecated methods in TableDescriotorBuilder (#5892) Signed-off-by: Duo Zhang --- .../hbase/client/TableDescriptorBuilder.java | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index fcdbe4e4ae64..8636b006e83d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -1380,26 +1380,6 @@ public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws return setCoprocessorToMap(value); } - /** - * Add a table coprocessor to this table. The coprocessor type must be - * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class - * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a - * region is opened. - * @param specStr The Coprocessor specification all in in one String - * @return the modifyable TD - * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed - * in HBase 3.0.0. - */ - @Deprecated - public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) - throws IOException { - CoprocessorDescriptor cpDesc = - toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( - "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); - checkHasCoprocessor(cpDesc.getClassName()); - return setCoprocessorToMap(specStr); - } - private void checkHasCoprocessor(final String className) throws IOException { if (hasCoprocessor(className)) { throw new IOException("Coprocessor " + className + " already exists."); From d1fc87eb1c3a36d46e18f84a6abf75f3106ef048 Mon Sep 17 00:00:00 2001 From: DieterDP <90392398+DieterDP-ng@users.noreply.github.com> Date: Wed, 15 May 2024 16:43:57 +0200 Subject: [PATCH 360/514] HBASE-28502 Cleanup old backup manifest logic (#5871) In older versions of HBase's backup mechanism, a manifest was written per table being backed up. This was since refactored to one manifest per backup, but the manifest code was not updated. A concrete issue with the old code was that the manifest for full backups did not correctly list the tables included in the backup. Signed-off-by: Nick Dimiduk Reviewed-by: Ray Mattingly --- .../hbase/backup/HBackupFileSystem.java | 44 +------------------ .../hbase/backup/impl/BackupAdminImpl.java | 8 ++-- .../hbase/backup/impl/BackupManifest.java | 2 +- .../backup/impl/RestoreTablesClient.java | 17 +++---- .../hbase/backup/impl/TableBackupClient.java | 42 +++--------------- .../hadoop/hbase/backup/util/BackupUtils.java | 7 ++- .../hadoop/hbase/backup/TestFullBackup.java | 11 +++++ .../hbase/backup/TestIncrementalBackup.java | 8 ++++ 8 files changed, 42 insertions(+), 97 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index c41a4a182435..2b27e7527477 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; -import java.util.HashMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.yetus.audience.InterfaceAudience; @@ -44,8 +42,8 @@ private HBackupFileSystem() { } /** - * Given the backup root dir, backup id and the table name, return the backup image location, - * which is also where the backup manifest file is. return value look like: + * Given the backup root dir, backup id and the table name, return the backup image location. + * Return value look like: * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory * @param backupRootDir backup root directory @@ -79,11 +77,6 @@ public static Path getBackupTmpDirPathForBackupId(String backupRoot, String back return new Path(getBackupTmpDirPath(backupRoot), backupId); } - public static String getTableBackupDataDir(String backupRootDir, String backupId, - TableName tableName) { - return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data"; - } - public static Path getBackupPath(String backupRootDir, String backupId) { return new Path(backupRootDir + Path.SEPARATOR + backupId); } @@ -102,24 +95,6 @@ public static Path getTableBackupPath(TableName tableName, Path backupRootPath, return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName)); } - /** - * Given the backup root dir and the backup id, return the log file location for an incremental - * backup. - * @param backupRootDir backup root directory - * @param backupId backup id - * @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738" - */ - public static String getLogBackupDir(String backupRootDir, String backupId) { - return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + HConstants.HREGION_LOGDIR_NAME; - } - - public static Path getLogBackupPath(String backupRootDir, String backupId) { - return new Path(getLogBackupDir(backupRootDir, backupId)); - } - - // TODO we do not keep WAL files anymore - // Move manifest file to other place private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId) throws IOException { FileSystem fs = backupRootPath.getFileSystem(conf); @@ -140,19 +115,4 @@ public static BackupManifest getManifest(Configuration conf, Path backupRootPath new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId)); return manifest; } - - /** - * Check whether the backup image path and there is manifest file in the path. - * @param backupManifestMap If all the manifests are found, then they are put into this map - * @param tableArray the tables involved - * @throws IOException exception - */ - public static void checkImageManifestExist(HashMap backupManifestMap, - TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId) - throws IOException { - for (TableName tableName : tableArray) { - BackupManifest manifest = getManifest(conf, backupRootPath, backupId); - backupManifestMap.put(tableName, manifest); - } - } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index f580fb0c47bb..69aef51a4ed3 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -498,16 +499,15 @@ private String[] toStringArray(TableName[] list) { @Override public void restore(RestoreRequest request) throws IOException { if (request.isCheck()) { - HashMap backupManifestMap = new HashMap<>(); // check and load backup image manifest for the tables Path rootPath = new Path(request.getBackupRootDir()); String backupId = request.getBackupId(); TableName[] sTableArray = request.getFromTables(); - HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, - conn.getConfiguration(), rootPath, backupId); + BackupManifest manifest = + HBackupFileSystem.getManifest(conn.getConfiguration(), rootPath, backupId); // Check and validate the backup image and its dependencies - if (BackupUtils.validate(backupManifestMap, conn.getConfiguration())) { + if (BackupUtils.validate(Arrays.asList(sTableArray), manifest, conn.getConfiguration())) { LOG.info(CHECK_OK); } else { LOG.error(CHECK_FAILED); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 3a1cbd55c58e..237d8686ab79 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -465,7 +465,7 @@ public List getTableList() { } /** - * TODO: fix it. Persist the manifest file. + * Persist the manifest file. * @throws BackupException if an error occurred while storing the manifest file. */ public void store(Configuration conf) throws BackupException { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java index 654fe343e27d..0c3c5b40ffb5 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.TreeSet; import org.apache.commons.lang3.StringUtils; @@ -204,19 +203,17 @@ private List getFilesRecursively(String fileBackupDir) /** * Restore operation. Stage 2: resolved Backup Image dependency - * @param backupManifestMap : tableName, Manifest - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to * @throws IOException exception */ - private void restore(HashMap backupManifestMap, - TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { + private void restore(BackupManifest manifest, TableName[] sTableArray, TableName[] tTableArray, + boolean isOverwrite) throws IOException { TreeSet restoreImageSet = new TreeSet<>(); for (int i = 0; i < sTableArray.length; i++) { TableName table = sTableArray[i]; - BackupManifest manifest = backupManifestMap.get(table); // Get the image list of this backup for restore in time order from old // to new. List list = new ArrayList<>(); @@ -256,12 +253,10 @@ public void execute() throws IOException { checkTargetTables(tTableArray, isOverwrite); // case RESTORE_IMAGES: - HashMap backupManifestMap = new HashMap<>(); // check and load backup image manifest for the tables Path rootPath = new Path(backupRootDir); - HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, - backupId); + BackupManifest manifest = HBackupFileSystem.getManifest(conf, rootPath, backupId); - restore(backupManifestMap, sTableArray, tTableArray, isOverwrite); + restore(manifest, sTableArray, tTableArray, isOverwrite); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java index 60dbc6470a77..e758ced3f846 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -268,7 +267,7 @@ public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo bac } /** - * Add manifest for the current backup. The manifest is stored within the table backup directory. + * Creates a manifest based on the provided info, and store it in the backup-specific directory. * @param backupInfo The current backup info * @throws IOException exception */ @@ -277,43 +276,16 @@ protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, B // set the overall backup phase : store manifest backupInfo.setPhase(BackupPhase.STORE_MANIFEST); - BackupManifest manifest; - - // Since we have each table's backup in its own directory structure, - // we'll store its manifest with the table directory. - for (TableName table : backupInfo.getTables()) { - manifest = new BackupManifest(backupInfo, table); - ArrayList ancestors = backupManager.getAncestors(backupInfo, table); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - - if (type == BackupType.INCREMENTAL) { - // We'll store the log timestamps for this table only in its manifest. - Map> tableTimestampMap = new HashMap<>(); - tableTimestampMap.put(table, backupInfo.getIncrTimestampMap().get(table)); - manifest.setIncrTimestampMap(tableTimestampMap); - ArrayList ancestorss = backupManager.getAncestors(backupInfo); - for (BackupImage image : ancestorss) { - manifest.addDependentImage(image); - } - } - manifest.store(conf); - } - - // For incremental backup, we store a overall manifest in - // /WALs/ - // This is used when created the next incremental backup + BackupManifest manifest = new BackupManifest(backupInfo); if (type == BackupType.INCREMENTAL) { - manifest = new BackupManifest(backupInfo); // set the table region server start and end timestamps for incremental backup manifest.setIncrTimestampMap(backupInfo.getIncrTimestampMap()); - ArrayList ancestors = backupManager.getAncestors(backupInfo); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - manifest.store(conf); } + ArrayList ancestors = backupManager.getAncestors(backupInfo); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + manifest.store(conf); } /** diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index d0a04960779d..aa87a2f3d401 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -664,15 +664,14 @@ public static RestoreRequest createRestoreRequest(String backupRootDir, String b return request; } - public static boolean validate(HashMap backupManifestMap, + public static boolean validate(List tables, BackupManifest backupManifest, Configuration conf) throws IOException { boolean isValid = true; - for (Entry manifestEntry : backupManifestMap.entrySet()) { - TableName table = manifestEntry.getKey(); + for (TableName table : tables) { TreeSet imageSet = new TreeSet<>(); - ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); + ArrayList depList = backupManifest.getDependentListByTable(table); if (depList != null && !depList.isEmpty()) { imageSet.addAll(depList); } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java index 7cec06799742..ba09817fcdec 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.util.HashSet; import java.util.List; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; @@ -30,6 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; + @Category(LargeTests.class) public class TestFullBackup extends TestBackupBase { @@ -56,6 +62,11 @@ public void testFullBackupMultipleCommand() throws Exception { String backupId = data.getBackupId(); assertTrue(checkSucceeded(backupId)); } + + BackupInfo newestBackup = backups.get(0); + BackupManifest manifest = + HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), newestBackup.getBackupId()); + assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList())); } LOG.info("backup complete"); } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 90fbba2bf0ae..86966ddfd6e0 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; import java.util.List; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -50,6 +54,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @Category(LargeTests.class) @RunWith(Parameterized.class) @@ -148,6 +153,9 @@ public void TestIncBackupRestore() throws Exception { request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); + BackupManifest manifest = + HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple); + assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList())); // add column family f2 to table1 // drop column family f3 From ad88ed3aacadc517808d0d6d27376414e0bbb87d Mon Sep 17 00:00:00 2001 From: gvprathyusha6 <70918688+gvprathyusha6@users.noreply.github.com> Date: Thu, 16 May 2024 01:26:31 +0530 Subject: [PATCH 361/514] HBASE-27938 PE load any custom implementation of tests at runtime (#5307) Signed-off-by: Duo Zhang Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../hadoop/hbase/PerformanceEvaluation.java | 52 +++++++++++++++++-- .../hbase/TestPerformanceEvaluation.java | 47 +++++++++++++++++ 2 files changed, 94 insertions(+), 5 deletions(-) diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index e0040c1f178a..97fcefe4a70c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -36,6 +36,7 @@ import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Properties; import java.util.Queue; import java.util.Random; import java.util.TreeMap; @@ -362,7 +363,8 @@ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { // {RegionSplitPolicy,replica count} does not match requested, or when the // number of column families does not match requested. if ( - (exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) + (exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions + && opts.presplitRegions != admin.getRegions(tableName).size()) || (!isReadCmd && desc != null && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) @@ -730,6 +732,7 @@ static class TestOptions { boolean cacheBlocks = true; Scan.ReadType scanReadType = Scan.ReadType.DEFAULT; long bufferSize = 2l * 1024l * 1024l; + Properties commandProperties; public TestOptions() { } @@ -786,6 +789,11 @@ public TestOptions(TestOptions that) { this.cacheBlocks = that.cacheBlocks; this.scanReadType = that.scanReadType; this.bufferSize = that.bufferSize; + this.commandProperties = that.commandProperties; + } + + public Properties getCommandProperties() { + return commandProperties; } public int getCaching() { @@ -1151,10 +1159,10 @@ private static long nextRandomSeed() { protected final Configuration conf; protected final TestOptions opts; - private final Status status; + protected final Status status; private String testName; - private Histogram latencyHistogram; + protected Histogram latencyHistogram; private Histogram replicaLatencyHistogram; private Histogram valueSizeHistogram; private Histogram rpcCallsHistogram; @@ -2626,7 +2634,7 @@ protected static void printUsage(final String shortName, final String message) { System.err.println(message); } System.err.print("Usage: hbase " + shortName); - System.err.println(" [-D]* "); + System.err.println(" [-D]* "); System.err.println(); System.err.println("General Options:"); System.err.println( @@ -2727,6 +2735,13 @@ protected static void printUsage(final String shortName, final String message) { System.err.println(String.format(" %-20s %s", command.getName(), command.getDescription())); } System.err.println(); + System.err.println("Class:"); + System.err.println("To run any custom implementation of PerformanceEvaluation.Test, " + + "provide the classname of the implementaion class in place of " + + "command name and it will be loaded at runtime from classpath.:"); + System.err.println("Please consider to contribute back " + + "this custom test impl into a builtin PE command for the benefit of the community"); + System.err.println(); System.err.println("Args:"); System.err.println(" nclients Integer. Required. Total number of clients " + "(and HRegionServers) running. 1 <= value <= 500"); @@ -3021,6 +3036,20 @@ static TestOptions parseOpts(Queue args) { continue; } + final String commandPropertiesFile = "--commandPropertiesFile="; + if (cmd.startsWith(commandPropertiesFile)) { + String fileName = String.valueOf(cmd.substring(commandPropertiesFile.length())); + Properties properties = new Properties(); + try { + properties + .load(PerformanceEvaluation.class.getClassLoader().getResourceAsStream(fileName)); + opts.commandProperties = properties; + } catch (IOException e) { + LOG.error("Failed to load metricIds from properties file", e); + } + continue; + } + validateParsedOpts(opts); if (isCommandClass(cmd)) { @@ -3134,7 +3163,20 @@ public int run(String[] args) throws Exception { } private static boolean isCommandClass(String cmd) { - return COMMANDS.containsKey(cmd); + return COMMANDS.containsKey(cmd) || isCustomTestClass(cmd); + } + + private static boolean isCustomTestClass(String cmd) { + Class cmdClass; + try { + cmdClass = + (Class) PerformanceEvaluation.class.getClassLoader().loadClass(cmd); + addCommandDescriptor(cmdClass, cmd, "custom command"); + return true; + } catch (Throwable th) { + LOG.info("No class found for command: " + cmd, th); + return false; + } } private static Class determineCommandClass(String cmd) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java index f02d30c3887b..cf11510a897a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java @@ -28,6 +28,8 @@ import com.codahale.metrics.UniformReservoir; import java.io.BufferedReader; import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.io.InputStreamReader; import java.lang.reflect.Constructor; @@ -35,6 +37,7 @@ import java.nio.charset.StandardCharsets; import java.util.LinkedList; import java.util.NoSuchElementException; +import java.util.Properties; import java.util.Queue; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; @@ -42,8 +45,10 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.PerformanceEvaluation.RandomReadTest; +import org.apache.hadoop.hbase.PerformanceEvaluation.Status; import org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -359,4 +364,46 @@ public void testParseOptsValueRandom() { assertEquals(true, options.valueRandom); } + @Test + public void testCustomTestClassOptions() throws IOException { + Queue opts = new LinkedList<>(); + // create custom properties that can be used for a custom test class + Properties commandProps = new Properties(); + commandProps.put("prop1", "val1"); + String cmdPropsFilePath = + this.getClass().getClassLoader().getResource("").getPath() + "cmd_properties.txt"; + FileWriter writer = new FileWriter(new File(cmdPropsFilePath)); + commandProps.store(writer, null); + // create opts for the custom test class - commandPropertiesFile, testClassName + opts.offer("--commandPropertiesFile=" + "cmd_properties.txt"); + String testClassName = "org.apache.hadoop.hbase.TestPerformanceEvaluation$PESampleTestImpl"; + opts.offer(testClassName); + opts.offer("1"); + PerformanceEvaluation.TestOptions options = PerformanceEvaluation.parseOpts(opts); + assertNotNull(options); + assertNotNull(options.getCmdName()); + assertEquals(testClassName, options.getCmdName()); + assertNotNull(options.getCommandProperties()); + assertEquals("val1", options.getCommandProperties().get("prop1")); + } + + class PESampleTestImpl extends PerformanceEvaluation.Test { + + PESampleTestImpl(Connection con, TestOptions options, Status status) { + super(con, options, status); + } + + @Override + void onStartup() throws IOException { + } + + @Override + void onTakedown() throws IOException { + } + + @Override + boolean testRow(int i, long startTime) throws IOException, InterruptedException { + return false; + } + } } From 716adf50e904dc8aef4a595fcf5ecdcbb120761e Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 16 May 2024 08:52:34 +0200 Subject: [PATCH 362/514] HBASE-28501 Support non-SPNEGO authentication methods and implement session handling in REST java client library (#5881) --- .../hadoop/hbase/rest/client/Client.java | 208 +++++++++++++++--- 1 file changed, 178 insertions(+), 30 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index abf00f938c9b..a7df571fb2f7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -18,14 +18,16 @@ package org.apache.hadoop.hbase.rest.client; import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Files; +import java.nio.file.Path; +import java.security.GeneralSecurityException; import java.security.KeyManagementException; import java.security.KeyStore; import java.security.KeyStoreException; @@ -44,9 +46,14 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; +import org.apache.hadoop.security.ssl.SSLFactory; +import org.apache.hadoop.security.ssl.SSLFactory.Mode; import org.apache.http.Header; +import org.apache.http.HttpHeaders; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.HttpClient; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpDelete; @@ -55,9 +62,12 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.InputStreamEntity; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.cookie.BasicClientCookie; import org.apache.http.message.BasicHeader; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; @@ -86,8 +96,11 @@ public class Client { private boolean sslEnabled; private HttpResponse resp; private HttpGet httpGet = null; - + private HttpClientContext stickyContext = null; + private BasicCredentialsProvider provider; + private Optional trustStore; private Map extraHeaders; + private KerberosAuthenticator authenticator; private static final String AUTH_COOKIE = "hadoop.auth"; private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "="; @@ -100,11 +113,13 @@ public Client() { this(null); } - private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, - Optional trustStore) { + private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, boolean sticky, + Optional trustStore, Optional userName, Optional password, + Optional bearerToken) { this.cluster = cluster; this.conf = conf; this.sslEnabled = sslEnabled; + this.trustStore = trustStore; extraHeaders = new ConcurrentHashMap<>(); String clspath = System.getProperty("java.class.path"); LOG.debug("classpath " + clspath); @@ -136,11 +151,41 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, } } + if (userName.isPresent() && password.isPresent()) { + // We want to stick to the old very limited authentication and session handling when sticky is + // not set + // to preserve backwards compatibility + if (!sticky) { + throw new IllegalArgumentException("BASIC auth is only implemented when sticky is set"); + } + provider = new BasicCredentialsProvider(); + // AuthScope.ANY is required for pre-emptive auth. We only ever use a single auth method + // anyway. + AuthScope anyAuthScope = AuthScope.ANY; + this.provider.setCredentials(anyAuthScope, + new UsernamePasswordCredentials(userName.get(), password.get())); + } + + if (bearerToken.isPresent()) { + // We want to stick to the old very limited authentication and session handling when sticky is + // not set + // to preserve backwards compatibility + if (!sticky) { + throw new IllegalArgumentException("BEARER auth is only implemented when sticky is set"); + } + // We could also put the header into the context or connection, but that would have the same + // effect. + extraHeaders.put(HttpHeaders.AUTHORIZATION, "Bearer " + bearerToken.get()); + } + this.httpClient = httpClientBuilder.build(); + setSticky(sticky); } /** - * Constructor + * Constructor This constructor will create an object using the old faulty load balancing logic. + * When specifying multiple servers in the cluster object, it is highly recommended to call + * setSticky() on the created client, or use one of the preferred constructors instead. * @param cluster the cluster definition */ public Client(Cluster cluster) { @@ -148,26 +193,35 @@ public Client(Cluster cluster) { } /** - * Constructor + * Constructor This constructor will create an object using the old faulty load balancing logic. + * When specifying multiple servers in the cluster object, it is highly recommended to call + * setSticky() on the created client, or use one of the preferred constructors instead. * @param cluster the cluster definition * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, boolean sslEnabled) { - initialize(cluster, HBaseConfiguration.create(), sslEnabled, Optional.empty()); + initialize(cluster, HBaseConfiguration.create(), sslEnabled, false, Optional.empty(), + Optional.empty(), Optional.empty(), Optional.empty()); } /** - * Constructor + * Constructor This constructor will create an object using the old faulty load balancing logic. + * When specifying multiple servers in the cluster object, it is highly recommended to call + * setSticky() on the created client, or use one of the preferred constructors instead. * @param cluster the cluster definition * @param conf Configuration * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { - initialize(cluster, conf, sslEnabled, Optional.empty()); + initialize(cluster, conf, sslEnabled, false, Optional.empty(), Optional.empty(), + Optional.empty(), Optional.empty()); } /** - * Constructor, allowing to define custom trust store (only for SSL connections) + * Constructor, allowing to define custom trust store (only for SSL connections) This constructor + * will create an object using the old faulty load balancing logic. When specifying multiple + * servers in the cluster object, it is highly recommended to call setSticky() on the created + * client, or use one of the preferred constructors instead. * @param cluster the cluster definition * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store @@ -176,22 +230,56 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { */ public Client(Cluster cluster, String trustStorePath, Optional trustStorePassword, Optional trustStoreType) { - this(cluster, HBaseConfiguration.create(), trustStorePath, trustStorePassword, trustStoreType); + this(cluster, HBaseConfiguration.create(), true, trustStorePath, trustStorePassword, + trustStoreType); } /** - * Constructor, allowing to define custom trust store (only for SSL connections) + * Constructor that accepts an optional trustStore and authentication information for either BASIC + * or BEARER authentication in sticky mode, which does not use the old faulty load balancing + * logic, and enables correct session handling. If neither userName/password, nor the bearer token + * is specified, the client falls back to SPNEGO auth. The loadTrustsore static method can be used + * to load a local trustStore file. This is the preferred constructor to use. + * @param cluster the cluster definition + * @param conf HBase/Hadoop configuration + * @param sslEnabled use HTTPS + * @param trustStore the optional trustStore object + * @param userName for BASIC auth + * @param password for BASIC auth + * @param bearerToken for BEAERER auth + */ + public Client(Cluster cluster, Configuration conf, boolean sslEnabled, + Optional trustStore, Optional userName, Optional password, + Optional bearerToken) { + initialize(cluster, conf, sslEnabled, true, trustStore, userName, password, bearerToken); + } + + /** + * Constructor, allowing to define custom trust store (only for SSL connections) This constructor + * also enables sticky mode. This is a preferred constructor when not using BASIC or JWT + * authentication. Clients created by this will use the old faulty load balancing logic. * @param cluster the cluster definition - * @param conf Configuration + * @param conf HBase/Hadoop Configuration * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store * @param trustStoreType type of custom trust store * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ - public Client(Cluster cluster, Configuration conf, String trustStorePath, + public Client(Cluster cluster, Configuration conf, boolean sslEnabled, String trustStorePath, Optional trustStorePassword, Optional trustStoreType) { + KeyStore trustStore = loadTruststore(trustStorePath, trustStorePassword, trustStoreType); + initialize(cluster, conf, sslEnabled, false, Optional.of(trustStore), Optional.empty(), + Optional.empty(), Optional.empty()); + } + + /** + * Loads a trustStore from the local fileSystem. Can be used to load the trustStore for the + * preferred constructor. + */ + public static KeyStore loadTruststore(String trustStorePath, Optional trustStorePassword, + Optional trustStoreType) { - char[] password = trustStorePassword.map(String::toCharArray).orElse(null); + char[] truststorePassword = trustStorePassword.map(String::toCharArray).orElse(null); String type = trustStoreType.orElse(KeyStore.getDefaultType()); KeyStore trustStore; @@ -202,13 +290,12 @@ public Client(Cluster cluster, Configuration conf, String trustStorePath, } try (InputStream inputStream = new BufferedInputStream(Files.newInputStream(new File(trustStorePath).toPath()))) { - trustStore.load(inputStream, password); + trustStore.load(inputStream, truststorePassword); } catch (CertificateException | NoSuchAlgorithmException | IOException e) { throw new ClientTrustStoreInitializationException("Trust store load error: " + trustStorePath, e); } - - initialize(cluster, conf, true, Optional.of(trustStore)); + return trustStore; } /** @@ -337,12 +424,24 @@ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String u } long startTime = EnvironmentEdgeManager.currentTime(); if (resp != null) EntityUtils.consumeQuietly(resp.getEntity()); - resp = httpClient.execute(method); + if (stickyContext != null) { + resp = httpClient.execute(method, stickyContext); + } else { + resp = httpClient.execute(method); + } if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { // Authentication error LOG.debug("Performing negotiation with the server."); - negotiate(method, uri); - resp = httpClient.execute(method); + try { + negotiate(method, uri); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + if (stickyContext != null) { + resp = httpClient.execute(method, stickyContext); + } else { + resp = httpClient.execute(method); + } } long endTime = EnvironmentEdgeManager.currentTime(); @@ -377,19 +476,58 @@ public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] hea * @param uri the String to parse as a URL. * @throws IOException if unknown protocol is found. */ - private void negotiate(HttpUriRequest method, String uri) throws IOException { + private void negotiate(HttpUriRequest method, String uri) + throws IOException, GeneralSecurityException { try { AuthenticatedURL.Token token = new AuthenticatedURL.Token(); - KerberosAuthenticator authenticator = new KerberosAuthenticator(); - authenticator.authenticate(new URL(uri), token); - // Inject the obtained negotiated token in the method cookie - injectToken(method, token); + if (authenticator == null) { + authenticator = new KerberosAuthenticator(); + if (trustStore.isPresent()) { + // The authenticator does not use Apache HttpClient, so we need to + // configure it separately to use the specified trustStore + Configuration sslConf = setupTrustStoreForHadoop(trustStore.get()); + SSLFactory sslFactory = new SSLFactory(Mode.CLIENT, sslConf); + sslFactory.init(); + authenticator.setConnectionConfigurator(sslFactory); + } + } + URL url = new URL(uri); + authenticator.authenticate(url, token); + if (sticky) { + BasicClientCookie authCookie = new BasicClientCookie("hadoop.auth", token.toString()); + // Hadoop eats the domain even if set by server + authCookie.setDomain(url.getHost()); + stickyContext.getCookieStore().addCookie(authCookie); + } else { + // session cookie is NOT set for backwards compatibility for non-sticky mode + // Inject the obtained negotiated token in the method cookie + // This is only done for this single request, the next one will trigger a new SPENGO + // handshake + injectToken(method, token); + } } catch (AuthenticationException e) { LOG.error("Failed to negotiate with the server.", e); throw new IOException(e); } } + private Configuration setupTrustStoreForHadoop(KeyStore trustStore) + throws IOException, KeyStoreException, NoSuchAlgorithmException, CertificateException { + Path tmpDirPath = Files.createTempDirectory("hbase_rest_client_truststore"); + File trustStoreFile = tmpDirPath.resolve("truststore.jks").toFile(); + // Shouldn't be needed with the secure temp dir, but let's generate a password anyway + String password = Double.toString(Math.random()); + try (FileOutputStream fos = new FileOutputStream(trustStoreFile)) { + trustStore.store(fos, password.toCharArray()); + } + + Configuration sslConf = new Configuration(); + // Type is the Java default, we use the same JVM to read this back + sslConf.set("ssl.client.keystore.location", trustStoreFile.getAbsolutePath()); + sslConf.set("ssl.client.keystore.password", password); + return sslConf; + } + /** * Helper method that injects an authentication token to send with the method. * @param method method to inject the authentication token into. @@ -431,11 +569,21 @@ public boolean isSticky() { * The default behaviour is load balancing by sending each request to a random host. This DOES NOT * work with scans, which have state on the REST servers. Set sticky to true before attempting * Scan related operations if more than one host is defined in the cluster. Nodes must not be - * added or removed from the Cluster object while sticky is true. + * added or removed from the Cluster object while sticky is true. Setting the sticky flag also + * enables session handling, which eliminates the need to re-authenticate each request, and lets + * the client handle any other cookies (like the sticky cookie set by load balancers) correctly. * @param sticky whether subsequent requests will use the same host */ public void setSticky(boolean sticky) { lastNodeId = null; + if (sticky) { + stickyContext = new HttpClientContext(); + if (provider != null) { + stickyContext.setCredentialsProvider(provider); + } + } else { + stickyContext = null; + } this.sticky = sticky; } @@ -654,7 +802,7 @@ public Response put(Cluster cluster, String path, Header[] headers, byte[] conte throws IOException { HttpPut method = new HttpPut(path); try { - method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); + method.setEntity(new ByteArrayEntity(content)); HttpResponse resp = execute(cluster, method, headers, path); headers = resp.getAllHeaders(); content = getResponseBody(resp); @@ -748,7 +896,7 @@ public Response post(Cluster cluster, String path, Header[] headers, byte[] cont throws IOException { HttpPost method = new HttpPost(path); try { - method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); + method.setEntity(new ByteArrayEntity(content)); HttpResponse resp = execute(cluster, method, headers, path); headers = resp.getAllHeaders(); content = getResponseBody(resp); From 00f078a05ef0d7bfc64942aa830ec2a892c9d9ac Mon Sep 17 00:00:00 2001 From: Kadir Ozdemir <37155482+kadirozde@users.noreply.github.com> Date: Thu, 16 May 2024 20:13:35 -0700 Subject: [PATCH 363/514] HBASE-25972 Dual File Compaction (#5545) Signed-off-by: Andrew Purtell Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../hadoop/hbase/PerformanceEvaluation.java | 49 +- .../hbase/io/hfile/HFilePrettyPrinter.java | 1 + .../regionserver/BrokenStoreFileCleaner.java | 2 +- .../hadoop/hbase/regionserver/CellSink.java | 11 + .../regionserver/DateTieredStoreEngine.java | 6 +- .../regionserver/DefaultStoreEngine.java | 6 +- .../regionserver/DefaultStoreFileManager.java | 119 ++- .../hadoop/hbase/regionserver/HStore.java | 43 +- .../hadoop/hbase/regionserver/HStoreFile.java | 20 + .../hbase/regionserver/StoreContext.java | 8 + .../hbase/regionserver/StoreEngine.java | 2 +- .../hbase/regionserver/StoreFileManager.java | 16 +- .../hbase/regionserver/StoreFileWriter.java | 864 +++++++++++++----- .../hbase/regionserver/StoreScanner.java | 15 +- .../regionserver/StripeStoreFileManager.java | 4 +- .../compactions/CompactionConfiguration.java | 8 + .../regionserver/compactions/Compactor.java | 2 +- .../compactions/DefaultCompactor.java | 12 +- .../compactions/StripeCompactionPolicy.java | 8 +- .../StoreFileTrackerBase.java | 4 +- .../regionserver/CreateRandomStoreFile.java | 2 +- .../regionserver/TestCompactorMemLeak.java | 2 +- .../hadoop/hbase/regionserver/TestHStore.java | 8 +- .../regionserver/TestStoreFileWriter.java | 355 +++++++ .../TestStripeStoreFileManager.java | 12 +- .../TestStripeCompactionPolicy.java | 12 +- 26 files changed, 1272 insertions(+), 319 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 97fcefe4a70c..9f97002f4b44 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -181,8 +181,11 @@ public class PerformanceEvaluation extends Configured implements Tool { addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", "Run random seek scan with both start and stop row (max 10000 rows)"); addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(RandomDeleteTest.class, "randomDelete", "Run random delete test"); addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); + addCommandDescriptor(SequentialDeleteTest.class, "sequentialDelete", + "Run sequential delete test"); addCommandDescriptor(MetaWriteTest.class, "metaWrite", "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); @@ -352,7 +355,8 @@ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { boolean needsDelete = false, exists = admin.tableExists(tableName); boolean isReadCmd = opts.cmdName.toLowerCase(Locale.ROOT).contains("read") || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan"); - if (!exists && isReadCmd) { + boolean isDeleteCmd = opts.cmdName.toLowerCase(Locale.ROOT).contains("delete"); + if (!exists && (isReadCmd || isDeleteCmd)) { throw new IllegalStateException( "Must specify an existing table for read commands. Run a write command first."); } @@ -367,7 +371,8 @@ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { && opts.presplitRegions != admin.getRegions(tableName).size()) || (!isReadCmd && desc != null && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) - || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) + || (!(isReadCmd || isDeleteCmd) && desc != null + && desc.getRegionReplication() != opts.replicas) || (desc != null && desc.getColumnFamilyCount() != opts.families) ) { needsDelete = true; @@ -2071,6 +2076,18 @@ protected byte[] generateRow(final int i) { } + static class RandomDeleteTest extends SequentialDeleteTest { + RandomDeleteTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + } + + @Override + protected byte[] generateRow(final int i) { + return getRandomRow(this.rand, opts.totalRows); + } + + } + static class ScanTest extends TableTest { private ResultScanner testScanner; @@ -2406,6 +2423,34 @@ boolean testRow(final int i, final long startTime) throws IOException { } } + static class SequentialDeleteTest extends BufferedMutatorTest { + + SequentialDeleteTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + } + + protected byte[] generateRow(final int i) { + return format(i); + } + + @Override + boolean testRow(final int i, final long startTime) throws IOException { + byte[] row = generateRow(i); + Delete delete = new Delete(row); + for (int family = 0; family < opts.families; family++) { + byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); + delete.addFamily(familyName); + } + delete.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); + if (opts.autoFlush) { + table.delete(delete); + } else { + mutator.mutate(delete); + } + return true; + } + } + /* * Insert fake regions into meta table with contiguous split keys. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 24db92b4de1c..0c32303746c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -537,6 +537,7 @@ private void printMeta(HFile.Reader reader, Map fileInfo) throws Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) || Bytes.equals(e.getKey(), HFileInfo.TAGS_COMPRESSED) || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY) + || Bytes.equals(e.getKey(), HStoreFile.HISTORICAL_KEY) ) { out.println(Bytes.toBoolean(e.getValue())); } else if (Bytes.equals(e.getKey(), HFileInfo.LASTKEY)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java index ba223de966c0..c235bdc29dc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java @@ -162,7 +162,7 @@ private boolean isCompactedFile(FileStatus file, HStore store) { } private boolean isActiveStorefile(FileStatus file, HStore store) { - return store.getStoreEngine().getStoreFileManager().getStorefiles().stream() + return store.getStoreEngine().getStoreFileManager().getStoreFiles().stream() .anyMatch(sf -> sf.getPath().equals(file.getPath())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java index c7587a147a6f..1d838d86abcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.yetus.audience.InterfaceAudience; @@ -34,4 +35,14 @@ public interface CellSink { * @param cell the cell to be added */ void append(Cell cell) throws IOException; + + /** + * Append the given (possibly partial) list of cells of a row + * @param cellList the cell list to be added + */ + default void appendAll(List cellList) throws IOException { + for (Cell cell : cellList) { + append(cell); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java index d15a6c92ef0b..ded6564bce53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java @@ -43,7 +43,7 @@ public class DateTieredStoreEngine extends StoreEngine { @Override public boolean needsCompaction(List filesCompacting) { - return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(), filesCompacting); + return compactionPolicy.needsCompaction(storeFileManager.getStoreFiles(), filesCompacting); } @Override @@ -65,14 +65,14 @@ private final class DateTieredCompactionContext extends CompactionContext { @Override public List preSelect(List filesCompacting) { - return compactionPolicy.preSelectCompactionForCoprocessor(storeFileManager.getStorefiles(), + return compactionPolicy.preSelectCompactionForCoprocessor(storeFileManager.getStoreFiles(), filesCompacting); } @Override public boolean select(List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { - request = compactionPolicy.selectCompaction(storeFileManager.getStorefiles(), filesCompacting, + request = compactionPolicy.selectCompaction(storeFileManager.getStoreFiles(), filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor); return request != null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java index 0c9fb9adcc2c..7b095596a3da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java @@ -56,7 +56,7 @@ public class DefaultStoreEngine extends StoreEngine filesCompacting) { - return compactionPolicy.needsCompaction(this.storeFileManager.getStorefiles(), filesCompacting); + return compactionPolicy.needsCompaction(this.storeFileManager.getStoreFiles(), filesCompacting); } @Override @@ -111,7 +111,7 @@ private class DefaultCompactionContext extends CompactionContext { @Override public boolean select(List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { - request = compactionPolicy.selectCompaction(storeFileManager.getStorefiles(), filesCompacting, + request = compactionPolicy.selectCompaction(storeFileManager.getStoreFiles(), filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor); return request != null; } @@ -124,7 +124,7 @@ public List compact(ThroughputController throughputController, User user) @Override public List preSelect(List filesCompacting) { - return compactionPolicy.preSelectCompactionForCoprocessor(storeFileManager.getStorefiles(), + return compactionPolicy.preSelectCompactionForCoprocessor(storeFileManager.getStoreFiles(), filesCompacting); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java index f2d7cd973688..920a490daa2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.regionserver.StoreFileWriter.shouldEnableHistoricalCompactionFiles; + +import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; @@ -48,17 +52,35 @@ class DefaultStoreFileManager implements StoreFileManager { private final CompactionConfiguration comConf; private final int blockingFileCount; private final Comparator storeFileComparator; - /** - * List of store files inside this store. This is an immutable list that is atomically replaced - * when its contents change. - */ - private volatile ImmutableList storefiles = ImmutableList.of(); + + static class StoreFileList { + /** + * List of store files inside this store. This is an immutable list that is atomically replaced + * when its contents change. + */ + final ImmutableList all; + /** + * List of store files that include the latest cells inside this store. This is an immutable + * list that is atomically replaced when its contents change. + */ + @Nullable + final ImmutableList live; + + StoreFileList(ImmutableList storeFiles, ImmutableList liveStoreFiles) { + this.all = storeFiles; + this.live = liveStoreFiles; + } + } + + private volatile StoreFileList storeFiles; + /** * List of compacted files inside this store that needs to be excluded in reads because further * new reads will be using only the newly created files out of compaction. These compacted files * will be deleted/cleared once all the existing readers on these compacted files are done. */ private volatile ImmutableList compactedfiles = ImmutableList.of(); + private final boolean enableLiveFileTracking; public DefaultStoreFileManager(CellComparator cellComparator, Comparator storeFileComparator, Configuration conf, @@ -66,18 +88,35 @@ public DefaultStoreFileManager(CellComparator cellComparator, this.cellComparator = cellComparator; this.storeFileComparator = storeFileComparator; this.comConf = comConf; - this.blockingFileCount = + blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); + enableLiveFileTracking = shouldEnableHistoricalCompactionFiles(conf); + storeFiles = + new StoreFileList(ImmutableList.of(), enableLiveFileTracking ? ImmutableList.of() : null); + } + + private List getLiveFiles(Collection storeFiles) throws IOException { + List liveFiles = new ArrayList<>(storeFiles.size()); + for (HStoreFile file : storeFiles) { + file.initReader(); + if (!file.isHistorical()) { + liveFiles.add(file); + } + } + return liveFiles; } @Override - public void loadFiles(List storeFiles) { - this.storefiles = ImmutableList.sortedCopyOf(storeFileComparator, storeFiles); + public void loadFiles(List storeFiles) throws IOException { + this.storeFiles = new StoreFileList(ImmutableList.sortedCopyOf(storeFileComparator, storeFiles), + enableLiveFileTracking + ? ImmutableList.sortedCopyOf(storeFileComparator, getLiveFiles(storeFiles)) + : null); } @Override - public final Collection getStorefiles() { - return storefiles; + public final Collection getStoreFiles() { + return storeFiles.all; } @Override @@ -86,15 +125,20 @@ public Collection getCompactedfiles() { } @Override - public void insertNewFiles(Collection sfs) { - this.storefiles = - ImmutableList.sortedCopyOf(storeFileComparator, Iterables.concat(this.storefiles, sfs)); + public void insertNewFiles(Collection sfs) throws IOException { + storeFiles = new StoreFileList( + ImmutableList.sortedCopyOf(storeFileComparator, Iterables.concat(storeFiles.all, sfs)), + enableLiveFileTracking + ? ImmutableList.sortedCopyOf(storeFileComparator, + Iterables.concat(storeFiles.live, getLiveFiles(sfs))) + : null); } @Override public ImmutableCollection clearFiles() { - ImmutableList result = storefiles; - storefiles = ImmutableList.of(); + ImmutableList result = storeFiles.all; + storeFiles = + new StoreFileList(ImmutableList.of(), enableLiveFileTracking ? ImmutableList.of() : null); return result; } @@ -107,7 +151,7 @@ public Collection clearCompactedFiles() { @Override public final int getStorefileCount() { - return storefiles.size(); + return storeFiles.all.size(); } @Override @@ -117,28 +161,38 @@ public final int getCompactedFilesCount() { @Override public void addCompactionResults(Collection newCompactedfiles, - Collection results) { - this.storefiles = ImmutableList.sortedCopyOf(storeFileComparator, Iterables - .concat(Iterables.filter(storefiles, sf -> !newCompactedfiles.contains(sf)), results)); + Collection results) throws IOException { + ImmutableList liveStoreFiles = null; + if (enableLiveFileTracking) { + liveStoreFiles = ImmutableList.sortedCopyOf(storeFileComparator, + Iterables.concat(Iterables.filter(storeFiles.live, sf -> !newCompactedfiles.contains(sf)), + getLiveFiles(results))); + } + storeFiles = + new StoreFileList( + ImmutableList + .sortedCopyOf(storeFileComparator, + Iterables.concat( + Iterables.filter(storeFiles.all, sf -> !newCompactedfiles.contains(sf)), results)), + liveStoreFiles); // Mark the files as compactedAway once the storefiles and compactedfiles list is finalized // Let a background thread close the actual reader on these compacted files and also // ensure to evict the blocks from block cache so that they are no longer in // cache newCompactedfiles.forEach(HStoreFile::markCompactedAway); - this.compactedfiles = ImmutableList.sortedCopyOf(storeFileComparator, - Iterables.concat(this.compactedfiles, newCompactedfiles)); + compactedfiles = ImmutableList.sortedCopyOf(storeFileComparator, + Iterables.concat(compactedfiles, newCompactedfiles)); } @Override public void removeCompactedFiles(Collection removedCompactedfiles) { - this.compactedfiles = - this.compactedfiles.stream().filter(sf -> !removedCompactedfiles.contains(sf)) - .sorted(storeFileComparator).collect(ImmutableList.toImmutableList()); + compactedfiles = compactedfiles.stream().filter(sf -> !removedCompactedfiles.contains(sf)) + .sorted(storeFileComparator).collect(ImmutableList.toImmutableList()); } @Override public final Iterator getCandidateFilesForRowKeyBefore(KeyValue targetKey) { - return this.storefiles.reverse().iterator(); + return storeFiles.all.reverse().iterator(); } @Override @@ -153,25 +207,28 @@ public Iterator updateCandidateFilesForRowKeyBefore( @Override public final Optional getSplitPoint() throws IOException { - return StoreUtils.getSplitPoint(storefiles, cellComparator); + return StoreUtils.getSplitPoint(storeFiles.all, cellComparator); } @Override - public final Collection getFilesForScan(byte[] startRow, boolean includeStartRow, - byte[] stopRow, boolean includeStopRow) { + public Collection getFilesForScan(byte[] startRow, boolean includeStartRow, + byte[] stopRow, boolean includeStopRow, boolean onlyLatestVersion) { + if (onlyLatestVersion && enableLiveFileTracking) { + return storeFiles.live; + } // We cannot provide any useful input and already have the files sorted by seqNum. - return getStorefiles(); + return getStoreFiles(); } @Override public int getStoreCompactionPriority() { - int priority = blockingFileCount - storefiles.size(); + int priority = blockingFileCount - storeFiles.all.size(); return (priority == HStore.PRIORITY_USER) ? priority + 1 : priority; } @Override public Collection getUnneededFiles(long maxTs, List filesCompacting) { - ImmutableList files = storefiles; + ImmutableList files = storeFiles.all; // 1) We can never get rid of the last file which has the maximum seqid. // 2) Files that are not the latest can't become one due to (1), so the rest are fair game. return files.stream().limit(Math.max(0, files.size() - 1)).filter(sf -> { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 9954c78142e9..3c879dbdb730 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -593,7 +593,7 @@ public long timeOfOldestEdit() { /** Returns All store files. */ @Override public Collection getStorefiles() { - return this.storeEngine.getStoreFileManager().getStorefiles(); + return this.storeEngine.getStoreFileManager().getStoreFiles(); } @Override @@ -956,10 +956,10 @@ private void notifyChangedReadersObservers(List sfs) throws IOExcept * @return all scanners for this store */ public List getScanners(boolean cacheBlocks, boolean isGet, boolean usePread, - boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt) - throws IOException { + boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt, + boolean onlyLatestVersion) throws IOException { return getScanners(cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, false, - readPt); + readPt, onlyLatestVersion); } /** @@ -977,13 +977,14 @@ public List getScanners(boolean cacheBlocks, boolean isGet, boo */ public List getScanners(boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, - byte[] stopRow, boolean includeStopRow, long readPt) throws IOException { + byte[] stopRow, boolean includeStopRow, long readPt, boolean onlyLatestVersion) + throws IOException { Collection storeFilesToScan; List memStoreScanners; this.storeEngine.readLock(); try { storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow, - includeStartRow, stopRow, includeStopRow); + includeStartRow, stopRow, includeStopRow, onlyLatestVersion); memStoreScanners = this.memstore.getScanners(readPt); // NOTE: here we must increase the refCount for storeFiles because we would open the // storeFiles and get the StoreFileScanners for them.If we don't increase the refCount here, @@ -1042,10 +1043,10 @@ private static void clearAndClose(List scanners) { */ public List getScanners(List files, boolean cacheBlocks, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, - byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner) - throws IOException { + byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner, + boolean onlyLatestVersion) throws IOException { return getScanners(files, cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, - false, readPt, includeMemstoreScanner); + false, readPt, includeMemstoreScanner, onlyLatestVersion); } /** @@ -1067,7 +1068,7 @@ public List getScanners(List files, boolean cacheBl public List getScanners(List files, boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt, - boolean includeMemstoreScanner) throws IOException { + boolean includeMemstoreScanner, boolean onlyLatestVersion) throws IOException { List memStoreScanners = null; if (includeMemstoreScanner) { this.storeEngine.readLock(); @@ -1428,7 +1429,7 @@ public CompactionProgress getCompactionProgress() { @Override public boolean shouldPerformMajorCompaction() throws IOException { - for (HStoreFile sf : this.storeEngine.getStoreFileManager().getStorefiles()) { + for (HStoreFile sf : this.storeEngine.getStoreFileManager().getStoreFiles()) { // TODO: what are these reader checks all over the place? if (sf.getReader() == null) { LOG.debug("StoreFile {} has null Reader", sf); @@ -1436,7 +1437,7 @@ public boolean shouldPerformMajorCompaction() throws IOException { } } return storeEngine.getCompactionPolicy() - .shouldPerformMajorCompaction(this.storeEngine.getStoreFileManager().getStorefiles()); + .shouldPerformMajorCompaction(this.storeEngine.getStoreFileManager().getStoreFiles()); } public Optional requestCompaction() throws IOException { @@ -1614,7 +1615,7 @@ private void finishCompactionRequest(CompactionRequestImpl cr) { protected void refreshStoreSizeAndTotalBytes() throws IOException { this.storeSize.set(0L); this.totalUncompressedBytes.set(0L); - for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { + for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStoreFiles()) { StoreFileReader r = hsf.getReader(); if (r == null) { LOG.debug("StoreFile {} has a null Reader", hsf); @@ -1762,7 +1763,7 @@ public List recreateScanners(List currentFileS return null; } return getScanners(filesToReopen, cacheBlocks, false, false, matcher, startRow, - includeStartRow, stopRow, includeStopRow, readPt, false); + includeStartRow, stopRow, includeStopRow, readPt, false, false); } finally { this.storeEngine.readUnlock(); } @@ -1784,7 +1785,7 @@ public int getCompactedFilesCount() { } private LongStream getStoreFileAgeStream() { - return this.storeEngine.getStoreFileManager().getStorefiles().stream().filter(sf -> { + return this.storeEngine.getStoreFileManager().getStoreFiles().stream().filter(sf -> { if (sf.getReader() == null) { LOG.debug("StoreFile {} has a null Reader", sf); return false; @@ -1812,13 +1813,13 @@ public OptionalDouble getAvgStoreFileAge() { @Override public long getNumReferenceFiles() { - return this.storeEngine.getStoreFileManager().getStorefiles().stream() + return this.storeEngine.getStoreFileManager().getStoreFiles().stream() .filter(HStoreFile::isReference).count(); } @Override public long getNumHFiles() { - return this.storeEngine.getStoreFileManager().getStorefiles().stream() + return this.storeEngine.getStoreFileManager().getStoreFiles().stream() .filter(HStoreFile::isHFile).count(); } @@ -1830,19 +1831,19 @@ public long getStoreSizeUncompressed() { @Override public long getStorefilesSize() { // Include all StoreFiles - return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), + return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStoreFiles(), sf -> true); } @Override public long getHFilesSize() { // Include only StoreFiles which are HFiles - return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), + return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStoreFiles(), HStoreFile::isHFile); } private long getStorefilesFieldSize(ToLongFunction f) { - return this.storeEngine.getStoreFileManager().getStorefiles().stream() + return this.storeEngine.getStoreFileManager().getStoreFiles().stream() .mapToLong(file -> StoreUtils.getStorefileFieldSize(file, f)).sum(); } @@ -2415,7 +2416,7 @@ public int getCurrentParallelPutCount() { } public int getStoreRefCount() { - return this.storeEngine.getStoreFileManager().getStorefiles().stream() + return this.storeEngine.getStoreFileManager().getStoreFiles().stream() .filter(sf -> sf.getReader() != null).filter(HStoreFile::isHFile) .mapToInt(HStoreFile::getRefCount).sum(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index 5df02bfb26a8..b2e222428bac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -125,6 +125,8 @@ public class HStoreFile implements StoreFile { */ public static final byte[] SKIP_RESET_SEQ_ID = Bytes.toBytes("SKIP_RESET_SEQ_ID"); + public static final byte[] HISTORICAL_KEY = Bytes.toBytes("HISTORICAL"); + private final StoreFileInfo fileInfo; // StoreFile.Reader @@ -138,6 +140,16 @@ public class HStoreFile implements StoreFile { // Indicates if the file got compacted private volatile boolean compactedAway = false; + // Indicates if the file contains historical cell versions. This is used when + // hbase.enable.historical.compaction.files is set to true. In that case, compactions + // can generate two files, one with the live cell versions and the other with the remaining + // (historical) cell versions. If isHistorical is true then the hfile is historical. + // Historical files are skipped for regular (not raw) scans for latest row versions. + // When hbase.enable.historical.compaction.files is false, isHistorical will be false + // for all files. This means all files will be treated as live files. Historical files are + // generated only when hbase.enable.historical.compaction.files is true. + private volatile boolean isHistorical = false; + // Keys for metadata stored in backing HFile. // Set when we obtain a Reader. private long sequenceid = -1; @@ -337,6 +349,10 @@ public boolean isCompactedAway() { return compactedAway; } + public boolean isHistorical() { + return isHistorical; + } + public int getRefCount() { return fileInfo.getRefCount(); } @@ -455,6 +471,10 @@ private void open() throws IOException { b = metadataMap.get(EXCLUDE_FROM_MINOR_COMPACTION_KEY); this.excludeFromMinorCompaction = (b != null && Bytes.toBoolean(b)); + b = metadataMap.get(HISTORICAL_KEY); + if (b != null) { + isHistorical = Bytes.toBoolean(b); + } BloomType hfileBloomType = initialReader.getBloomFilterType(); if (cfBloomType != BloomType.NONE) { initialReader.loadBloomfilter(BlockType.GENERAL_BLOOM_META, metrics); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java index 48618a6976ce..7bb800a1d39c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -118,6 +118,14 @@ public RegionInfo getRegionInfo() { return regionFileSystem.getRegionInfo(); } + public int getMaxVersions() { + return family.getMaxVersions(); + } + + public boolean getNewVersionBehavior() { + return family.isNewVersionBehavior(); + } + public boolean isPrimaryReplicaStore() { return getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java index 34f882516bae..fbf9a4ffb135 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java @@ -360,7 +360,7 @@ public void refreshStoreFiles(Collection newFiles) throws IOException { * replicas to keep up to date with the primary region files. */ private void refreshStoreFilesInternal(Collection newFiles) throws IOException { - Collection currentFiles = storeFileManager.getStorefiles(); + Collection currentFiles = storeFileManager.getStoreFiles(); Collection compactedFiles = storeFileManager.getCompactedfiles(); if (currentFiles == null) { currentFiles = Collections.emptySet(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java index 387fa559dcd3..86a14047f138 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -50,7 +50,7 @@ public interface StoreFileManager { */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") - void loadFiles(List storeFiles); + void loadFiles(List storeFiles) throws IOException; /** * Adds new files, either for from MemStore flush or bulk insert, into the structure. @@ -58,7 +58,7 @@ public interface StoreFileManager { */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") - void insertNewFiles(Collection sfs); + void insertNewFiles(Collection sfs) throws IOException; /** * Adds only the new compaction results into the structure. @@ -67,7 +67,8 @@ public interface StoreFileManager { */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") - void addCompactionResults(Collection compactedFiles, Collection results); + void addCompactionResults(Collection compactedFiles, Collection results) + throws IOException; /** * Remove the compacted files @@ -95,7 +96,7 @@ public interface StoreFileManager { * checks; should not assume anything about relations between store files in the list. * @return The list of StoreFiles. */ - Collection getStorefiles(); + Collection getStoreFiles(); /** * List of compacted files inside this store that needs to be excluded in reads because further @@ -119,12 +120,13 @@ public interface StoreFileManager { /** * Gets the store files to scan for a Scan or Get request. - * @param startRow Start row of the request. - * @param stopRow Stop row of the request. + * @param startRow Start row of the request. + * @param stopRow Stop row of the request. + * @param onlyLatestVersion Scan only latest live version cells. * @return The list of files that are to be read for this request. */ Collection getFilesForScan(byte[] startRow, boolean includeStartRow, byte[] stopRow, - boolean includeStopRow); + boolean includeStopRow, boolean onlyLatestVersion); /** * Gets initial, full list of candidate store files to check for row-key-before. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index 17e0001fb0cc..67fa2244e957 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -17,22 +17,27 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.BLOOM_FILTER_PARAM_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.BLOOM_FILTER_TYPE_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.COMPACTION_EVENT_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.DELETE_FAMILY_COUNT; import static org.apache.hadoop.hbase.regionserver.HStoreFile.EARLIEST_PUT_TS; +import static org.apache.hadoop.hbase.regionserver.HStoreFile.HISTORICAL_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAX_SEQ_ID_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.MOB_CELLS_COUNT; import static org.apache.hadoop.hbase.regionserver.HStoreFile.MOB_FILE_REFS; import static org.apache.hadoop.hbase.regionserver.HStoreFile.TIMERANGE_KEY; +import static org.apache.hadoop.hbase.regionserver.StoreEngine.STORE_ENGINE_CLASS_KEY; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.UUID; import java.util.function.Consumer; @@ -43,6 +48,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -53,6 +60,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.util.BloomContext; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.BloomFilterUtil; @@ -68,6 +76,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Strings; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.SetMultimap; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -79,24 +88,42 @@ @InterfaceAudience.Private public class StoreFileWriter implements CellSink, ShipperListener { private static final Logger LOG = LoggerFactory.getLogger(StoreFileWriter.class.getName()); + public static final String ENABLE_HISTORICAL_COMPACTION_FILES = + "hbase.enable.historical.compaction.files"; + public static final boolean DEFAULT_ENABLE_HISTORICAL_COMPACTION_FILES = false; private static final Pattern dash = Pattern.compile("-"); - private final BloomFilterWriter generalBloomFilterWriter; - private final BloomFilterWriter deleteFamilyBloomFilterWriter; + private SingleStoreFileWriter liveFileWriter; + private SingleStoreFileWriter historicalFileWriter; + private final FileSystem fs; + private final Path historicalFilePath; + private final Configuration conf; + private final CacheConfig cacheConf; private final BloomType bloomType; - private byte[] bloomParam = null; - private long earliestPutTs = HConstants.LATEST_TIMESTAMP; - private long deleteFamilyCnt = 0; - private BloomContext bloomContext = null; - private BloomContext deleteFamilyBloomContext = null; - private final TimeRangeTracker timeRangeTracker; + private final long maxKeys; + private final InetSocketAddress[] favoredNodes; + private final HFileContext fileContext; + private final boolean shouldDropCacheBehind; private final Supplier> compactedFilesSupplier; - - protected HFile.Writer writer; + private final CellComparator comparator; + private Cell lastCell; + // The first (latest) delete family marker of the current row + private Cell deleteFamily; + // The list of delete family version markers of the current row + private List deleteFamilyVersionList = new ArrayList<>(); + // The first (latest) delete column marker of the current column + private Cell deleteColumn; + // The list of delete column version markers of the current column + private List deleteColumnVersionList = new ArrayList<>(); + // The live put cell count for the current column + private int livePutCellCount; + private final int maxVersions; + private final boolean newVersionBehavior; /** * Creates an HFile.Writer that also write helpful meta data. * @param fs file system to write to - * @param path file name to create + * @param liveFilePath the name of the live file to create + * @param historicalFilePath the name of the historical file name to create * @param conf user configuration * @param bloomType bloom filter setting * @param maxKeys the expected maximum number of keys to be added. Was used for @@ -105,72 +132,61 @@ public class StoreFileWriter implements CellSink, ShipperListener { * @param fileContext The HFile context * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file. * @param compactedFilesSupplier Returns the {@link HStore} compacted files which not archived + * @param comparator Cell comparator + * @param maxVersions max cell versions + * @param newVersionBehavior enable new version behavior * @throws IOException problem writing to FS */ - private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, - BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, - boolean shouldDropCacheBehind, Supplier> compactedFilesSupplier) - throws IOException { + private StoreFileWriter(FileSystem fs, Path liveFilePath, Path historicalFilePath, + final Configuration conf, CacheConfig cacheConf, BloomType bloomType, long maxKeys, + InetSocketAddress[] favoredNodes, HFileContext fileContext, boolean shouldDropCacheBehind, + Supplier> compactedFilesSupplier, CellComparator comparator, + int maxVersions, boolean newVersionBehavior) throws IOException { + this.fs = fs; + this.historicalFilePath = historicalFilePath; + this.conf = conf; + this.cacheConf = cacheConf; + this.bloomType = bloomType; + this.maxKeys = maxKeys; + this.favoredNodes = favoredNodes; + this.fileContext = fileContext; + this.shouldDropCacheBehind = shouldDropCacheBehind; this.compactedFilesSupplier = compactedFilesSupplier; - this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); - // TODO : Change all writers to be specifically created for compaction context - writer = - HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withFavoredNodes(favoredNodes) - .withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create(); - - generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, - bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + this.comparator = comparator; + this.maxVersions = maxVersions; + this.newVersionBehavior = newVersionBehavior; + liveFileWriter = new SingleStoreFileWriter(fs, liveFilePath, conf, cacheConf, bloomType, + maxKeys, favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); + } - if (generalBloomFilterWriter != null) { - this.bloomType = bloomType; - this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf); - if (LOG.isTraceEnabled()) { - LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", param: " - + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH - ? Bytes.toInt(bloomParam) - : Bytes.toStringBinary(bloomParam)) - + ", " + generalBloomFilterWriter.getClass().getSimpleName()); + public static boolean shouldEnableHistoricalCompactionFiles(Configuration conf) { + if ( + conf.getBoolean(ENABLE_HISTORICAL_COMPACTION_FILES, + DEFAULT_ENABLE_HISTORICAL_COMPACTION_FILES) + ) { + // Historical compaction files are supported only for default store engine with + // default compactor. + String storeEngine = conf.get(STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName()); + if (!storeEngine.equals(DefaultStoreEngine.class.getName())) { + LOG.warn("Historical compaction file generation is ignored for " + storeEngine + + ". hbase.enable.historical.compaction.files can be set to true only for the " + + "default compaction (DefaultStoreEngine and DefaultCompactor)"); + return false; } - // init bloom context - switch (bloomType) { - case ROW: - bloomContext = - new RowBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); - break; - case ROWCOL: - bloomContext = - new RowColBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); - break; - case ROWPREFIX_FIXED_LENGTH: - bloomContext = new RowPrefixFixedLengthBloomContext(generalBloomFilterWriter, - fileContext.getCellComparator(), Bytes.toInt(bloomParam)); - break; - default: - throw new IOException( - "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or ROWPREFIX expected)"); + String compactor = conf.get(DEFAULT_COMPACTOR_CLASS_KEY, DefaultCompactor.class.getName()); + if (!compactor.equals(DefaultCompactor.class.getName())) { + LOG.warn("Historical compaction file generation is ignored for " + compactor + + ". hbase.enable.historical.compaction.files can be set to true only for the " + + "default compaction (DefaultStoreEngine and DefaultCompactor)"); + return false; } - } else { - // Not using Bloom filters. - this.bloomType = BloomType.NONE; - } - - // initialize delete family Bloom filter when there is NO RowCol Bloom filter - if (this.bloomType != BloomType.ROWCOL) { - this.deleteFamilyBloomFilterWriter = BloomFilterFactory.createDeleteBloomAtWrite(conf, - cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); - deleteFamilyBloomContext = - new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator()); - } else { - deleteFamilyBloomFilterWriter = null; - } - if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) { - LOG.trace("Delete Family Bloom filter type for " + path + ": " - + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + return true; } + return false; } public long getPos() throws IOException { - return ((HFileWriterImpl) writer).getPos(); + return liveFileWriter.getPos(); } /** @@ -181,7 +197,10 @@ public long getPos() throws IOException { */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) throws IOException { - appendMetadata(maxSequenceId, majorCompaction, Collections.emptySet()); + liveFileWriter.appendMetadata(maxSequenceId, majorCompaction); + if (historicalFileWriter != null) { + historicalFileWriter.appendMetadata(maxSequenceId, majorCompaction); + } } /** @@ -193,37 +212,10 @@ public void appendMetadata(final long maxSequenceId, final boolean majorCompacti */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, final Collection storeFiles) throws IOException { - writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); - writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); - writer.appendFileInfo(COMPACTION_EVENT_KEY, toCompactionEventTrackerBytes(storeFiles)); - appendTrackedTimestampsToMetadata(); - } - - /** - * Used when write {@link HStoreFile#COMPACTION_EVENT_KEY} to new file's file info. The compacted - * store files's name is needed. But if the compacted store file is a result of compaction, it's - * compacted files which still not archived is needed, too. And don't need to add compacted files - * recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will - * write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E - * to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to - * add D's compacted file, as D's compacted files has been in E's compacted files, too. See - * HBASE-20724 for more details. - * @param storeFiles The compacted store files to generate this new file - * @return bytes of CompactionEventTracker - */ - private byte[] toCompactionEventTrackerBytes(Collection storeFiles) { - Set notArchivedCompactedStoreFiles = this.compactedFilesSupplier.get().stream() - .map(sf -> sf.getPath().getName()).collect(Collectors.toSet()); - Set compactedStoreFiles = new HashSet<>(); - for (HStoreFile storeFile : storeFiles) { - compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName()); - for (String csf : storeFile.getCompactedStoreFiles()) { - if (notArchivedCompactedStoreFiles.contains(csf)) { - compactedStoreFiles.add(csf); - } - } + liveFileWriter.appendMetadata(maxSequenceId, majorCompaction, storeFiles); + if (historicalFileWriter != null) { + historicalFileWriter.appendMetadata(maxSequenceId, majorCompaction, storeFiles); } - return ProtobufUtil.toCompactionEventTrackerBytes(compactedStoreFiles); } /** @@ -235,10 +227,10 @@ private byte[] toCompactionEventTrackerBytes(Collection storeFiles) */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, final long mobCellsCount) throws IOException { - writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); - writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); - writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount)); - appendTrackedTimestampsToMetadata(); + liveFileWriter.appendMetadata(maxSequenceId, majorCompaction, mobCellsCount); + if (historicalFileWriter != null) { + historicalFileWriter.appendMetadata(maxSequenceId, majorCompaction, mobCellsCount); + } } /** @@ -247,7 +239,10 @@ public void appendMetadata(final long maxSequenceId, final boolean majorCompacti * @throws IOException problem writing to FS */ public void appendMobMetadata(SetMultimap mobRefSet) throws IOException { - writer.appendFileInfo(MOB_FILE_REFS, MobUtils.serializeMobFileRefs(mobRefSet)); + liveFileWriter.appendMobMetadata(mobRefSet); + if (historicalFileWriter != null) { + historicalFileWriter.appendMobMetadata(mobRefSet); + } } /** @@ -256,156 +251,560 @@ public void appendMobMetadata(SetMultimap mobRefSet) throws I public void appendTrackedTimestampsToMetadata() throws IOException { // TODO: The StoreFileReader always converts the byte[] to TimeRange // via TimeRangeTracker, so we should write the serialization data of TimeRange directly. - appendFileInfo(TIMERANGE_KEY, TimeRangeTracker.toByteArray(timeRangeTracker)); - appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); + liveFileWriter.appendTrackedTimestampsToMetadata(); + if (historicalFileWriter != null) { + historicalFileWriter.appendTrackedTimestampsToMetadata(); + } } - /** - * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker - * to include the timestamp of this key - */ - public void trackTimestamps(final Cell cell) { - if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { - earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); + @Override + public void beforeShipped() throws IOException { + liveFileWriter.beforeShipped(); + if (historicalFileWriter != null) { + historicalFileWriter.beforeShipped(); } - timeRangeTracker.includeTimestamp(cell); } - private void appendGeneralBloomfilter(final Cell cell) throws IOException { - if (this.generalBloomFilterWriter != null) { - /* - * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png - * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp 3 Types of - * Filtering: 1. Row = Row 2. RowCol = Row + Qualifier 3. RowPrefixFixedLength = Fixed Length - * Row Prefix - */ - bloomContext.writeBloom(cell); + public Path getPath() { + return liveFileWriter.getPath(); + } + + public List getPaths() { + if (historicalFileWriter == null) { + return Lists.newArrayList(liveFileWriter.getPath()); } + return Lists.newArrayList(liveFileWriter.getPath(), historicalFileWriter.getPath()); } - private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { - if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { - return; + public boolean hasGeneralBloom() { + return liveFileWriter.hasGeneralBloom(); + } + + /** + * For unit testing only. + * @return the Bloom filter used by this writer. + */ + BloomFilterWriter getGeneralBloomWriter() { + return liveFileWriter.generalBloomFilterWriter; + } + + public void close() throws IOException { + liveFileWriter.appendFileInfo(HISTORICAL_KEY, Bytes.toBytes(false)); + liveFileWriter.close(); + if (historicalFileWriter != null) { + historicalFileWriter.appendFileInfo(HISTORICAL_KEY, Bytes.toBytes(true)); + historicalFileWriter.close(); } + } - // increase the number of delete family in the store file - deleteFamilyCnt++; - if (this.deleteFamilyBloomFilterWriter != null) { - deleteFamilyBloomContext.writeBloom(cell); + public void appendFileInfo(byte[] key, byte[] value) throws IOException { + liveFileWriter.appendFileInfo(key, value); + if (historicalFileWriter != null) { + historicalFileWriter.appendFileInfo(key, value); } } - @Override - public void append(final Cell cell) throws IOException { - appendGeneralBloomfilter(cell); - appendDeleteFamilyBloomFilter(cell); - writer.append(cell); - trackTimestamps(cell); + /** + * For use in testing. + */ + HFile.Writer getLiveFileWriter() { + return liveFileWriter.getHFileWriter(); } - @Override - public void beforeShipped() throws IOException { - // For now these writer will always be of type ShipperListener true. - // TODO : Change all writers to be specifically created for compaction context - writer.beforeShipped(); - if (generalBloomFilterWriter != null) { - generalBloomFilterWriter.beforeShipped(); + /** + * @param dir Directory to create file in. + * @return random filename inside passed dir + */ + public static Path getUniqueFile(final FileSystem fs, final Path dir) throws IOException { + if (!fs.getFileStatus(dir).isDirectory()) { + throw new IOException("Expecting " + dir.toString() + " to be a directory"); } - if (deleteFamilyBloomFilterWriter != null) { - deleteFamilyBloomFilterWriter.beforeShipped(); + return new Path(dir, dash.matcher(UUID.randomUUID().toString()).replaceAll("")); + } + + private SingleStoreFileWriter getHistoricalFileWriter() throws IOException { + if (historicalFileWriter == null) { + historicalFileWriter = + new SingleStoreFileWriter(fs, historicalFilePath, conf, cacheConf, bloomType, maxKeys, + favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); } + return historicalFileWriter; } - public Path getPath() { - return this.writer.getPath(); + private void initRowState() { + deleteFamily = null; + deleteFamilyVersionList.clear(); + lastCell = null; } - public boolean hasGeneralBloom() { - return this.generalBloomFilterWriter != null; + private void initColumnState() { + livePutCellCount = 0; + deleteColumn = null; + deleteColumnVersionList.clear(); + } - /** - * For unit testing only. - * @return the Bloom filter used by this writer. - */ - BloomFilterWriter getGeneralBloomWriter() { - return generalBloomFilterWriter; + private boolean isDeletedByDeleteFamily(Cell cell) { + return deleteFamily != null && (deleteFamily.getTimestamp() > cell.getTimestamp() + || (deleteFamily.getTimestamp() == cell.getTimestamp() + && (!newVersionBehavior || cell.getSequenceId() < deleteFamily.getSequenceId()))); } - private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException { - boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0); - if (haveBloom) { - bfw.compactBloom(); + private boolean isDeletedByDeleteFamilyVersion(Cell cell) { + for (Cell deleteFamilyVersion : deleteFamilyVersionList) { + if ( + deleteFamilyVersion.getTimestamp() == cell.getTimestamp() + && (!newVersionBehavior || cell.getSequenceId() < deleteFamilyVersion.getSequenceId()) + ) { + return true; + } } - return haveBloom; + return false; } - private boolean closeGeneralBloomFilter() throws IOException { - boolean hasGeneralBloom = closeBloomFilter(generalBloomFilterWriter); + private boolean isDeletedByDeleteColumn(Cell cell) { + return deleteColumn != null && (deleteColumn.getTimestamp() > cell.getTimestamp() + || (deleteColumn.getTimestamp() == cell.getTimestamp() + && (!newVersionBehavior || cell.getSequenceId() < deleteColumn.getSequenceId()))); + } - // add the general Bloom filter writer and append file info - if (hasGeneralBloom) { - writer.addGeneralBloomFilter(generalBloomFilterWriter); - writer.appendFileInfo(BLOOM_FILTER_TYPE_KEY, Bytes.toBytes(bloomType.toString())); - if (bloomParam != null) { - writer.appendFileInfo(BLOOM_FILTER_PARAM_KEY, bloomParam); + private boolean isDeletedByDeleteColumnVersion(Cell cell) { + for (Cell deleteColumnVersion : deleteColumnVersionList) { + if ( + deleteColumnVersion.getTimestamp() == cell.getTimestamp() + && (!newVersionBehavior || cell.getSequenceId() < deleteColumnVersion.getSequenceId()) + ) { + return true; } - bloomContext.addLastBloomKey(writer); } - return hasGeneralBloom; + return false; } - private boolean closeDeleteFamilyBloomFilter() throws IOException { - boolean hasDeleteFamilyBloom = closeBloomFilter(deleteFamilyBloomFilterWriter); + private boolean isDeleted(Cell cell) { + return isDeletedByDeleteFamily(cell) || isDeletedByDeleteColumn(cell) + || isDeletedByDeleteFamilyVersion(cell) || isDeletedByDeleteColumnVersion(cell); + } - // add the delete family Bloom filter writer - if (hasDeleteFamilyBloom) { - writer.addDeleteFamilyBloomFilter(deleteFamilyBloomFilterWriter); + private void appendCell(Cell cell) throws IOException { + if ((lastCell == null || !CellUtil.matchingColumn(lastCell, cell))) { + initColumnState(); } + if (cell.getType() == Cell.Type.DeleteFamily) { + if (deleteFamily == null) { + deleteFamily = cell; + liveFileWriter.append(cell); + } else { + getHistoricalFileWriter().append(cell); + } + } else if (cell.getType() == Cell.Type.DeleteFamilyVersion) { + if (!isDeletedByDeleteFamily(cell)) { + deleteFamilyVersionList.add(cell); + if (deleteFamily != null && deleteFamily.getTimestamp() == cell.getTimestamp()) { + // This means both the delete-family and delete-family-version markers have the same + // timestamp but the sequence id of delete-family-version marker is higher than that of + // the delete-family marker. In this case, there is no need to add the + // delete-family-version marker to the live version file. This case happens only with + // the new version behavior. + liveFileWriter.append(cell); + } else { + liveFileWriter.append(cell); + } + } else { + getHistoricalFileWriter().append(cell); + } + } else if (cell.getType() == Cell.Type.DeleteColumn) { + if (!isDeletedByDeleteFamily(cell) && deleteColumn == null) { + deleteColumn = cell; + liveFileWriter.append(cell); + } else { + getHistoricalFileWriter().append(cell); + } + } else if (cell.getType() == Cell.Type.Delete) { + if (!isDeletedByDeleteFamily(cell) && deleteColumn == null) { + deleteColumnVersionList.add(cell); + if (deleteFamily != null && deleteFamily.getTimestamp() == cell.getTimestamp()) { + // This means both the delete-family and delete-column-version markers have the same + // timestamp but the sequence id of delete-column-version marker is higher than that of + // the delete-family marker. In this case, there is no need to add the + // delete-column-version marker to the live version file. This case happens only with + // the new version behavior. + getHistoricalFileWriter().append(cell); + } else { + liveFileWriter.append(cell); + } + } else { + getHistoricalFileWriter().append(cell); + } + } else if (cell.getType() == Cell.Type.Put) { + if (livePutCellCount < maxVersions) { + // This is a live put cell (i.e., the latest version) of a column. Is it deleted? + if (!isDeleted(cell)) { + liveFileWriter.append(cell); + livePutCellCount++; + } else { + // It is deleted + getHistoricalFileWriter().append(cell); + if (newVersionBehavior) { + // Deleted versions are considered toward total version count when newVersionBehavior + livePutCellCount++; + } + } + } else { + // It is an older put cell + getHistoricalFileWriter().append(cell); + } + } + lastCell = cell; + } - // append file info about the number of delete family kvs - // even if there is no delete family Bloom. - writer.appendFileInfo(DELETE_FAMILY_COUNT, Bytes.toBytes(this.deleteFamilyCnt)); + @Override + public void appendAll(List cellList) throws IOException { + if (historicalFilePath == null) { + // The dual writing is not enabled and all cells are written to one file. We use + // the live version file in this case + for (Cell cell : cellList) { + liveFileWriter.append(cell); + } + return; + } + if (cellList.isEmpty()) { + return; + } + if (lastCell != null && comparator.compareRows(lastCell, cellList.get(0)) != 0) { + // It is a new row and thus time to reset the state + initRowState(); + } + for (Cell cell : cellList) { + appendCell(cell); + } + } - return hasDeleteFamilyBloom; + @Override + public void append(Cell cell) throws IOException { + if (historicalFilePath == null) { + // The dual writing is not enabled and all cells are written to one file. We use + // the live version file in this case + liveFileWriter.append(cell); + return; + } + appendCell(cell); } - public void close() throws IOException { - boolean hasGeneralBloom = this.closeGeneralBloomFilter(); - boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); + private static class SingleStoreFileWriter { + private final BloomFilterWriter generalBloomFilterWriter; + private final BloomFilterWriter deleteFamilyBloomFilterWriter; + private final BloomType bloomType; + private byte[] bloomParam = null; + private long earliestPutTs = HConstants.LATEST_TIMESTAMP; + private long deleteFamilyCnt = 0; + private BloomContext bloomContext = null; + private BloomContext deleteFamilyBloomContext = null; + private final TimeRangeTracker timeRangeTracker; + private final Supplier> compactedFilesSupplier; + + private HFile.Writer writer; - writer.close(); + /** + * Creates an HFile.Writer that also write helpful meta data. + * @param fs file system to write to + * @param path file name to create + * @param conf user configuration + * @param bloomType bloom filter setting + * @param maxKeys the expected maximum number of keys to be added. Was used for + * Bloom filter size in {@link HFile} format version 1. + * @param favoredNodes an array of favored nodes or possibly null + * @param fileContext The HFile context + * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file. + * @param compactedFilesSupplier Returns the {@link HStore} compacted files which not archived + * @throws IOException problem writing to FS + */ + private SingleStoreFileWriter(FileSystem fs, Path path, final Configuration conf, + CacheConfig cacheConf, BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, + HFileContext fileContext, boolean shouldDropCacheBehind, + Supplier> compactedFilesSupplier) throws IOException { + this.compactedFilesSupplier = compactedFilesSupplier; + this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); + // TODO : Change all writers to be specifically created for compaction context + writer = + HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withFavoredNodes(favoredNodes) + .withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create(); + + generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, + bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + + if (generalBloomFilterWriter != null) { + this.bloomType = bloomType; + this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf); + if (LOG.isTraceEnabled()) { + LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", param: " + + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH + ? Bytes.toInt(bloomParam) + : Bytes.toStringBinary(bloomParam)) + + ", " + generalBloomFilterWriter.getClass().getSimpleName()); + } + // init bloom context + switch (bloomType) { + case ROW: + bloomContext = + new RowBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); + break; + case ROWCOL: + bloomContext = + new RowColBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); + break; + case ROWPREFIX_FIXED_LENGTH: + bloomContext = new RowPrefixFixedLengthBloomContext(generalBloomFilterWriter, + fileContext.getCellComparator(), Bytes.toInt(bloomParam)); + break; + default: + throw new IOException( + "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or ROWPREFIX expected)"); + } + } else { + // Not using Bloom filters. + this.bloomType = BloomType.NONE; + } - // Log final Bloom filter statistics. This needs to be done after close() - // because compound Bloom filters might be finalized as part of closing. - if (LOG.isTraceEnabled()) { - LOG.trace( - (hasGeneralBloom ? "" : "NO ") + "General Bloom and " + (hasDeleteFamilyBloom ? "" : "NO ") - + "DeleteFamily" + " was added to HFile " + getPath()); + // initialize delete family Bloom filter when there is NO RowCol Bloom filter + if (this.bloomType != BloomType.ROWCOL) { + this.deleteFamilyBloomFilterWriter = BloomFilterFactory.createDeleteBloomAtWrite(conf, + cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + deleteFamilyBloomContext = + new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator()); + } else { + deleteFamilyBloomFilterWriter = null; + } + if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) { + LOG.trace("Delete Family Bloom filter type for " + path + ": " + + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + } } - } + private long getPos() throws IOException { + return ((HFileWriterImpl) writer).getPos(); + } - public void appendFileInfo(byte[] key, byte[] value) throws IOException { - writer.appendFileInfo(key, value); - } + /** + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. + * @param majorCompaction True if this file is product of a major compaction + * @throws IOException problem writing to FS + */ + private void appendMetadata(final long maxSequenceId, final boolean majorCompaction) + throws IOException { + appendMetadata(maxSequenceId, majorCompaction, Collections.emptySet()); + } - /** - * For use in testing. - */ - HFile.Writer getHFileWriter() { - return writer; - } + /** + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. + * @param majorCompaction True if this file is product of a major compaction + * @param storeFiles The compacted store files to generate this new file + * @throws IOException problem writing to FS + */ + private void appendMetadata(final long maxSequenceId, final boolean majorCompaction, + final Collection storeFiles) throws IOException { + writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); + writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); + writer.appendFileInfo(COMPACTION_EVENT_KEY, toCompactionEventTrackerBytes(storeFiles)); + appendTrackedTimestampsToMetadata(); + } - /** - * @param dir Directory to create file in. - * @return random filename inside passed dir - */ - public static Path getUniqueFile(final FileSystem fs, final Path dir) throws IOException { - if (!fs.getFileStatus(dir).isDirectory()) { - throw new IOException("Expecting " + dir.toString() + " to be a directory"); + /** + * Used when write {@link HStoreFile#COMPACTION_EVENT_KEY} to new file's file info. The + * compacted store files's name is needed. But if the compacted store file is a result of + * compaction, it's compacted files which still not archived is needed, too. And don't need to + * add compacted files recursively. If file A, B, C compacted to new file D, and file D + * compacted to new file E, will write A, B, C, D to file E's compacted files. So if file E + * compacted to new file F, will add E to F's compacted files first, then add E's compacted + * files: A, B, C, D to it. And no need to add D's compacted file, as D's compacted files has + * been in E's compacted files, too. See HBASE-20724 for more details. + * @param storeFiles The compacted store files to generate this new file + * @return bytes of CompactionEventTracker + */ + private byte[] toCompactionEventTrackerBytes(Collection storeFiles) { + Set notArchivedCompactedStoreFiles = this.compactedFilesSupplier.get().stream() + .map(sf -> sf.getPath().getName()).collect(Collectors.toSet()); + Set compactedStoreFiles = new HashSet<>(); + for (HStoreFile storeFile : storeFiles) { + compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName()); + for (String csf : storeFile.getCompactedStoreFiles()) { + if (notArchivedCompactedStoreFiles.contains(csf)) { + compactedStoreFiles.add(csf); + } + } + } + return ProtobufUtil.toCompactionEventTrackerBytes(compactedStoreFiles); + } + + /** + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. + * @param majorCompaction True if this file is product of a major compaction + * @param mobCellsCount The number of mob cells. + * @throws IOException problem writing to FS + */ + private void appendMetadata(final long maxSequenceId, final boolean majorCompaction, + final long mobCellsCount) throws IOException { + writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); + writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); + writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount)); + appendTrackedTimestampsToMetadata(); + } + + /** + * Appends MOB - specific metadata (even if it is empty) + * @param mobRefSet - original table -> set of MOB file names + * @throws IOException problem writing to FS + */ + private void appendMobMetadata(SetMultimap mobRefSet) throws IOException { + writer.appendFileInfo(MOB_FILE_REFS, MobUtils.serializeMobFileRefs(mobRefSet)); + } + + /** + * Add TimestampRange and earliest put timestamp to Metadata + */ + private void appendTrackedTimestampsToMetadata() throws IOException { + // TODO: The StoreFileReader always converts the byte[] to TimeRange + // via TimeRangeTracker, so we should write the serialization data of TimeRange directly. + appendFileInfo(TIMERANGE_KEY, TimeRangeTracker.toByteArray(timeRangeTracker)); + appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); + } + + /** + * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker + * to include the timestamp of this key + */ + private void trackTimestamps(final Cell cell) { + if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { + earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); + } + timeRangeTracker.includeTimestamp(cell); + } + + private void appendGeneralBloomfilter(final Cell cell) throws IOException { + if (this.generalBloomFilterWriter != null) { + /* + * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue. + * png Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp 3 Types of + * Filtering: 1. Row = Row 2. RowCol = Row + Qualifier 3. RowPrefixFixedLength = Fixed + * Length Row Prefix + */ + bloomContext.writeBloom(cell); + } + } + + private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { + if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { + return; + } + + // increase the number of delete family in the store file + deleteFamilyCnt++; + if (this.deleteFamilyBloomFilterWriter != null) { + deleteFamilyBloomContext.writeBloom(cell); + } + } + + private void append(final Cell cell) throws IOException { + appendGeneralBloomfilter(cell); + appendDeleteFamilyBloomFilter(cell); + writer.append(cell); + trackTimestamps(cell); + } + + private void beforeShipped() throws IOException { + // For now these writer will always be of type ShipperListener true. + // TODO : Change all writers to be specifically created for compaction context + writer.beforeShipped(); + if (generalBloomFilterWriter != null) { + generalBloomFilterWriter.beforeShipped(); + } + if (deleteFamilyBloomFilterWriter != null) { + deleteFamilyBloomFilterWriter.beforeShipped(); + } + } + + private Path getPath() { + return this.writer.getPath(); + } + + private boolean hasGeneralBloom() { + return this.generalBloomFilterWriter != null; + } + + /** + * For unit testing only. + * @return the Bloom filter used by this writer. + */ + BloomFilterWriter getGeneralBloomWriter() { + return generalBloomFilterWriter; + } + + private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException { + boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0); + if (haveBloom) { + bfw.compactBloom(); + } + return haveBloom; + } + + private boolean closeGeneralBloomFilter() throws IOException { + boolean hasGeneralBloom = closeBloomFilter(generalBloomFilterWriter); + + // add the general Bloom filter writer and append file info + if (hasGeneralBloom) { + writer.addGeneralBloomFilter(generalBloomFilterWriter); + writer.appendFileInfo(BLOOM_FILTER_TYPE_KEY, Bytes.toBytes(bloomType.toString())); + if (bloomParam != null) { + writer.appendFileInfo(BLOOM_FILTER_PARAM_KEY, bloomParam); + } + bloomContext.addLastBloomKey(writer); + } + return hasGeneralBloom; + } + + private boolean closeDeleteFamilyBloomFilter() throws IOException { + boolean hasDeleteFamilyBloom = closeBloomFilter(deleteFamilyBloomFilterWriter); + + // add the delete family Bloom filter writer + if (hasDeleteFamilyBloom) { + writer.addDeleteFamilyBloomFilter(deleteFamilyBloomFilterWriter); + } + + // append file info about the number of delete family kvs + // even if there is no delete family Bloom. + writer.appendFileInfo(DELETE_FAMILY_COUNT, Bytes.toBytes(this.deleteFamilyCnt)); + + return hasDeleteFamilyBloom; + } + + private void close() throws IOException { + boolean hasGeneralBloom = this.closeGeneralBloomFilter(); + boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); + + writer.close(); + + // Log final Bloom filter statistics. This needs to be done after close() + // because compound Bloom filters might be finalized as part of closing. + if (LOG.isTraceEnabled()) { + LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + + (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + + getPath()); + } + + } + + private void appendFileInfo(byte[] key, byte[] value) throws IOException { + writer.appendFileInfo(key, value); + } + + /** + * For use in testing. + */ + private HFile.Writer getHFileWriter() { + return writer; } - return new Path(dir, dash.matcher(UUID.randomUUID().toString()).replaceAll("")); } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", @@ -418,7 +817,9 @@ public static class Builder { private BloomType bloomType = BloomType.NONE; private long maxKeyCount = 0; private Path dir; - private Path filePath; + private Path liveFilePath; + private Path historicalFilePath; + private InetSocketAddress[] favoredNodes; private HFileContext fileContext; private boolean shouldDropCacheBehind; @@ -430,6 +831,10 @@ public static class Builder { // store files which are not recorded in the SFT, but for the newly created store file writer, // they are not tracked in SFT, so here we need to record them and treat them specially. private Consumer writerCreationTracker; + private int maxVersions; + private boolean newVersionBehavior; + private CellComparator comparator; + private boolean isCompaction; public Builder(Configuration conf, CacheConfig cacheConf, FileSystem fs) { this.conf = conf; @@ -465,7 +870,7 @@ public Builder withOutputDir(Path dir) { */ public Builder withFilePath(Path filePath) { Preconditions.checkNotNull(filePath); - this.filePath = filePath; + this.liveFilePath = filePath; return this; } @@ -519,17 +924,37 @@ public Builder withWriterCreationTracker(Consumer writerCreationTracker) { return this; } + public Builder withMaxVersions(int maxVersions) { + this.maxVersions = maxVersions; + return this; + } + + public Builder withNewVersionBehavior(boolean newVersionBehavior) { + this.newVersionBehavior = newVersionBehavior; + return this; + } + + public Builder withCellComparator(CellComparator comparator) { + this.comparator = comparator; + return this; + } + + public Builder withIsCompaction(boolean isCompaction) { + this.isCompaction = isCompaction; + return this; + } + /** * Create a store file writer. Client is responsible for closing file when done. If metadata, * add BEFORE closing using {@link StoreFileWriter#appendMetadata}. */ public StoreFileWriter build() throws IOException { - if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { + if ((dir == null ? 0 : 1) + (liveFilePath == null ? 0 : 1) != 1) { throw new IllegalArgumentException("Either specify parent directory " + "or file path"); } if (dir == null) { - dir = filePath.getParent(); + dir = liveFilePath.getParent(); } if (!fs.exists(dir)) { @@ -545,7 +970,7 @@ public StoreFileWriter build() throws IOException { } CommonFSUtils.setStoragePolicy(this.fs, dir, policyName); - if (filePath == null) { + if (liveFilePath == null) { // The stored file and related blocks will used the directory based StoragePolicy. // Because HDFS DistributedFileSystem does not support create files with storage policy // before version 3.3.0 (See HDFS-13209). Use child dir here is to make stored files @@ -560,21 +985,30 @@ public StoreFileWriter build() throws IOException { } CommonFSUtils.setStoragePolicy(this.fs, dir, fileStoragePolicy); } - filePath = getUniqueFile(fs, dir); + liveFilePath = getUniqueFile(fs, dir); if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) { bloomType = BloomType.NONE; } } + + if (isCompaction && shouldEnableHistoricalCompactionFiles(conf)) { + historicalFilePath = getUniqueFile(fs, dir); + } + // make sure we call this before actually create the writer // in fact, it is not a big deal to even add an inexistent file to the track, as we will never // try to delete it and finally we will clean the tracker up after compaction. But if the file // cleaner find the file but we haven't recorded it yet, it may accidentally delete the file // and cause problem. if (writerCreationTracker != null) { - writerCreationTracker.accept(filePath); + writerCreationTracker.accept(liveFilePath); + if (historicalFilePath != null) { + writerCreationTracker.accept(historicalFilePath); + } } - return new StoreFileWriter(fs, filePath, conf, cacheConf, bloomType, maxKeyCount, - favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); + return new StoreFileWriter(fs, liveFilePath, historicalFilePath, conf, cacheConf, bloomType, + maxKeyCount, favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier, + comparator, maxVersions, newVersionBehavior); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index c12307841a2b..89d4aa34e78c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -223,6 +224,12 @@ private void addCurrentScanners(List scanners) { this.currentScanners.addAll(scanners); } + private static boolean isOnlyLatestVersionScan(Scan scan) { + // No need to check for Scan#getMaxVersions because live version files generated by store file + // writer retains max versions specified in ColumnFamilyDescriptor for the given CF + return !scan.isRaw() && scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP; + } + /** * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are not in a * compaction. @@ -247,7 +254,8 @@ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet sfs, List memStoreSc // Eagerly creating scanners so that we have the ref counting ticking on the newly created // store files. In case of stream scanners this eager creation does not induce performance // penalty because in scans (that uses stream scanners) the next() call is bound to happen. - List scanners = store.getScanners(sfs, cacheBlocks, get, usePread, - isCompaction, matcher, scan.getStartRow(), scan.getStopRow(), this.readPt, false); + List scanners = + store.getScanners(sfs, cacheBlocks, get, usePread, isCompaction, matcher, + scan.getStartRow(), scan.getStopRow(), this.readPt, false, isOnlyLatestVersionScan(scan)); flushedstoreFileScanners.addAll(scanners); if (!CollectionUtils.isEmpty(memStoreScanners)) { clearAndClose(memStoreScannersAfterFlush); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 85f61a029ad9..8ac8397b868c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -136,7 +136,7 @@ public void loadFiles(List storeFiles) { } @Override - public Collection getStorefiles() { + public Collection getStoreFiles() { return state.allFilesCached; } @@ -300,7 +300,7 @@ private double getMidStripeSplitRatio(long smallerSize, long largerSize, long la @Override public Collection getFilesForScan(byte[] startRow, boolean includeStartRow, - byte[] stopRow, boolean includeStopRow) { + byte[] stopRow, boolean includeStopRow, boolean onlyLatestVersion) { if (state.stripeFiles.isEmpty()) { return state.level0Files; // There's just L0. } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index 251c8227da00..538efecb4018 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import static org.apache.hadoop.hbase.regionserver.StoreFileWriter.shouldEnableHistoricalCompactionFiles; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; @@ -143,6 +145,12 @@ public class CompactionConfiguration { conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, storeConfigInfo.getMemStoreFlushSize()); minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY_OLD, 3))); + if (shouldEnableHistoricalCompactionFiles(conf)) { + // If historical file writing is enabled, we bump up the min value by one as DualFileWriter + // compacts files into two files, live and historical, instead of one. This also eliminates + // infinite re-compaction when the min value is set to 2 + minFilesToCompact += 1; + } maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10); compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index d9ad265da64e..e58c53c355f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -464,7 +464,6 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel lastCleanCell = null; lastCleanCellSeqId = 0; } - writer.append(c); int len = c.getSerializedSize(); ++progress.currentCompactedKVs; progress.totalCompactedSize += len; @@ -478,6 +477,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel return false; } } + writer.appendAll(cells); if (shipper != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) { if (lastCleanCell != null) { // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index eb803c3e2a88..bcc84230952f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -31,8 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - /** * Compact passed set of files. Create an instance and then call * {@link #compact(CompactionRequestImpl, ThroughputController, User)} @@ -67,7 +65,7 @@ public List compact(final CompactionRequestImpl request, @Override protected List commitWriter(StoreFileWriter writer, FileDetails fd, CompactionRequestImpl request) throws IOException { - List newFiles = Lists.newArrayList(writer.getPath()); + List newFiles = writer.getPaths(); writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), request.getFiles()); writer.close(); return newFiles; @@ -75,17 +73,19 @@ protected List commitWriter(StoreFileWriter writer, FileDetails fd, @Override protected final void abortWriter(StoreFileWriter writer) throws IOException { - Path leftoverFile = writer.getPath(); + List leftoverFiles = writer.getPaths(); try { writer.close(); } catch (IOException e) { LOG.warn("Failed to close the writer after an unfinished compaction.", e); } try { - store.getFileSystem().delete(leftoverFile, false); + for (Path path : leftoverFiles) { + store.getFileSystem().delete(path, false); + } } catch (IOException e) { LOG.warn("Failed to delete the leftover file {} after an unfinished compaction.", - leftoverFile, e); + leftoverFiles, e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index f5be2b380382..9a00508cd00d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -66,7 +66,7 @@ public List preSelectFilesForCoprocessor(StripeInformationProvider s // We sincerely hope nobody is messing with us with their coprocessors. // If they do, they are very likely to shoot themselves in the foot. // We'll just exclude all the filesCompacting from the list. - ArrayList candidateFiles = new ArrayList<>(si.getStorefiles()); + ArrayList candidateFiles = new ArrayList<>(si.getStoreFiles()); candidateFiles.removeAll(filesCompacting); return candidateFiles; } @@ -114,7 +114,7 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, // This can happen due to region split. We can skip it later; for now preserve // compact-all-things behavior. - Collection allFiles = si.getStorefiles(); + Collection allFiles = si.getStoreFiles(); if (StoreUtils.hasReferences(allFiles)) { LOG.debug("There are references in the store; compacting all files"); long targetKvs = estimateTargetKvs(allFiles, config.getInitialCount()).getFirst(); @@ -165,7 +165,7 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, public boolean needsCompactions(StripeInformationProvider si, List filesCompacting) { // Approximation on whether we need compaction. - return filesCompacting.isEmpty() && (StoreUtils.hasReferences(si.getStorefiles()) + return filesCompacting.isEmpty() && (StoreUtils.hasReferences(si.getStoreFiles()) || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); } @@ -577,7 +577,7 @@ public void setMajorRangeFull() { /** The information about stripes that the policy needs to do its stuff */ public static interface StripeInformationProvider { - public Collection getStorefiles(); + public Collection getStoreFiles(); /** * Gets the start row for a given stripe. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index bdf3b92db65d..794a707062e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -185,7 +185,9 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th .withFileContext(hFileContext).withShouldDropCacheBehind(params.shouldDropBehind()) .withCompactedFilesSupplier(ctx.getCompactedFilesSupplier()) .withFileStoragePolicy(params.fileStoragePolicy()) - .withWriterCreationTracker(params.writerCreationTracker()); + .withWriterCreationTracker(params.writerCreationTracker()) + .withMaxVersions(ctx.getMaxVersions()).withNewVersionBehavior(ctx.getNewVersionBehavior()) + .withCellComparator(ctx.getComparator()).withIsCompaction(params.isCompaction()); return builder.build(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index 4754c5ba530b..320fc99f15b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -193,7 +193,7 @@ public boolean run(String[] args) throws IOException { int numMetaBlocks = ThreadLocalRandom.current().nextInt(10) + 1; LOG.info("Writing " + numMetaBlocks + " meta blocks"); for (int metaI = 0; metaI < numMetaBlocks; ++metaI) { - sfw.getHFileWriter().appendMetaBlock(generateString(), new BytesWritable(generateValue())); + sfw.getLiveFileWriter().appendMetaBlock(generateString(), new BytesWritable(generateValue())); } sfw.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java index cdabdf27491c..08bbed6e18ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java @@ -129,7 +129,7 @@ public MyCompactor(Configuration conf, HStore store) { @Override protected List commitWriter(StoreFileWriter writer, FileDetails fd, CompactionRequestImpl request) throws IOException { - HFileWriterImpl writerImpl = (HFileWriterImpl) writer.writer; + HFileWriterImpl writerImpl = (HFileWriterImpl) writer.getLiveFileWriter(); Cell cell = writerImpl.getLastCell(); // The cell should be backend with an KeyOnlyKeyValue. IS_LAST_CELL_ON_HEAP.set(cell instanceof KeyOnlyKeyValue); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index e888639eac4a..ccc755a03580 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -1764,7 +1764,7 @@ public void testAge() throws IOException { Arrays.asList(mockStoreFile(currentTime - 10), mockStoreFile(currentTime - 100), mockStoreFile(currentTime - 1000), mockStoreFile(currentTime - 10000)); StoreFileManager sfm = mock(StoreFileManager.class); - when(sfm.getStorefiles()).thenReturn(storefiles); + when(sfm.getStoreFiles()).thenReturn(storefiles); StoreEngine storeEngine = mock(StoreEngine.class); when(storeEngine.getStoreFileManager()).thenReturn(sfm); return storeEngine; @@ -1805,10 +1805,10 @@ private static class MyStore extends HStore { public List getScanners(List files, boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt, - boolean includeMemstoreScanner) throws IOException { + boolean includeMemstoreScanner, boolean onlyLatestVersion) throws IOException { hook.getScanners(this); return super.getScanners(files, cacheBlocks, usePread, isCompaction, matcher, startRow, true, - stopRow, false, readPt, includeMemstoreScanner); + stopRow, false, readPt, includeMemstoreScanner, onlyLatestVersion); } @Override @@ -1958,7 +1958,7 @@ public void testHFileContextSetWithCFAndTable() throws Exception { .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(10000L) .compression(Compression.Algorithm.NONE).isCompaction(true).includeMVCCReadpoint(true) .includesTag(false).shouldDropBehind(true)); - HFileContext hFileContext = writer.getHFileWriter().getFileContext(); + HFileContext hFileContext = writer.getLiveFileWriter().getFileContext(); assertArrayEquals(family, hFileContext.getColumnFamily()); assertArrayEquals(table, hFileContext.getTableName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java new file mode 100644 index 000000000000..6146605cd23e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java @@ -0,0 +1,355 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.NEW_VERSION_BEHAVIOR; +import static org.apache.hadoop.hbase.regionserver.StoreFileWriter.ENABLE_HISTORICAL_COMPACTION_FILES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Random; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Store file writer does not do any compaction. Each cell written to either the live or historical + * file. Regular (i.e., not-raw) scans that reads the latest put cells scans only live files. To + * ensure the correctness of store file writer, we need to verify that live files includes all live + * cells. This test indirectly verify this as follows. The test creates two tables, each with one + * region and one store. The dual file writing (live vs historical) is configured on only one of the + * tables. The test generates exact set of mutations on both tables. These mutations include all + * types of cells and these cells are written to multiple files using multiple memstore flushes. + * After writing all cells, the test first verify that both tables return the same set of cells for + * regular and raw scans. Then the same verification is done after tables are minor and finally + * major compacted. The test also verifies that flushes do not generate historical files and the + * historical files are generated only when historical file generation is enabled (by the config + * hbase.enable.historical.compaction.files). + */ +@Category({ MediumTests.class, RegionServerTests.class }) +@RunWith(Parameterized.class) +public class TestStoreFileWriter { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestStoreFileWriter.class); + private final int ROW_NUM = 100; + private final Random RANDOM = new Random(11); + private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); + private HRegion[] regions = new HRegion[2]; + private final byte[][] qualifiers = + { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2") }; + // This keeps track of all cells. It is a list of rows, each row is a list of columns, each + // column is a list of CellInfo object + private ArrayList>> insertedCells; + private TableName[] tableName = new TableName[2]; + private final Configuration conf = testUtil.getConfiguration(); + private int flushCount = 0; + + @Parameterized.Parameter(0) + public KeepDeletedCells keepDeletedCells; + @Parameterized.Parameter(1) + public int maxVersions; + @Parameterized.Parameter(2) + public boolean newVersionBehavior; + + @Parameterized.Parameters(name = "keepDeletedCells={0}, maxVersions={1}, newVersionBehavior={2}") + public static synchronized Collection data() { + return Arrays.asList( + new Object[][] { { KeepDeletedCells.FALSE, 1, true }, { KeepDeletedCells.FALSE, 2, false }, + { KeepDeletedCells.FALSE, 3, true }, { KeepDeletedCells.TRUE, 1, false }, + // { KeepDeletedCells.TRUE, 2, true }, see HBASE-28442 + { KeepDeletedCells.TRUE, 3, false } }); + } + + // In memory representation of a cell. We only need to know timestamp and type field for our + // testing for cell. Please note the row for the cell is implicit in insertedCells. + private static class CellInfo { + long timestamp; + Cell.Type type; + + CellInfo(long timestamp, Cell.Type type) { + this.timestamp = timestamp; + this.type = type; + } + } + + private void createTable(int index, boolean enableDualFileWriter) throws IOException { + tableName[index] = TableName.valueOf(getClass().getSimpleName() + "_" + index); + ColumnFamilyDescriptor familyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(HBaseTestingUtil.fam1).setMaxVersions(maxVersions) + .setKeepDeletedCells(keepDeletedCells) + .setValue(NEW_VERSION_BEHAVIOR, Boolean.toString(newVersionBehavior)).build(); + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(tableName[index]).setColumnFamily(familyDescriptor) + .setValue(ENABLE_HISTORICAL_COMPACTION_FILES, Boolean.toString(enableDualFileWriter)); + testUtil.createTable(builder.build(), null); + regions[index] = testUtil.getMiniHBaseCluster().getRegions(tableName[index]).get(0); + } + + @Before + public void setUp() throws Exception { + conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 6); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(MemoryCompactionPolicy.NONE)); + testUtil.startMiniCluster(); + createTable(0, false); + createTable(1, true); + insertedCells = new ArrayList<>(ROW_NUM); + for (int r = 0; r < ROW_NUM; r++) { + insertedCells.add(new ArrayList<>(qualifiers.length)); + for (int q = 0; q < qualifiers.length; q++) { + insertedCells.get(r).add(new ArrayList<>(10)); + } + } + } + + @After + public void tearDown() throws Exception { + this.testUtil.shutdownMiniCluster(); + testUtil.cleanupTestDir(); + } + + @Test + public void testCompactedFiles() throws Exception { + for (int i = 0; i < 10; i++) { + insertRows(ROW_NUM * maxVersions); + deleteRows(ROW_NUM / 8); + deleteRowVersions(ROW_NUM / 8); + deleteColumns(ROW_NUM / 8); + deleteColumnVersions(ROW_NUM / 8); + flushRegion(); + } + + verifyCells(); + + HStore[] stores = new HStore[2]; + + stores[0] = regions[0].getStore(HBaseTestingUtil.fam1); + assertEquals(flushCount, stores[0].getStorefilesCount()); + + stores[1] = regions[1].getStore(HBaseTestingUtil.fam1); + assertEquals(flushCount, stores[1].getStorefilesCount()); + + regions[0].compact(false); + assertEquals(flushCount - stores[0].getCompactedFiles().size() + 1, + stores[0].getStorefilesCount()); + + regions[1].compact(false); + assertEquals(flushCount - stores[1].getCompactedFiles().size() + 2, + stores[1].getStorefilesCount()); + + verifyCells(); + + regions[0].compact(true); + assertEquals(1, stores[0].getStorefilesCount()); + + regions[1].compact(true); + assertEquals(keepDeletedCells == KeepDeletedCells.FALSE ? 1 : 2, + stores[1].getStorefilesCount()); + + verifyCells(); + } + + private void verifyCells() throws Exception { + scanAndCompare(false); + scanAndCompare(true); + } + + private void flushRegion() throws Exception { + regions[0].flush(true); + regions[1].flush(true); + flushCount++; + } + + private Long getRowTimestamp(int row) { + Long maxTimestamp = null; + for (int q = 0; q < qualifiers.length; q++) { + int size = insertedCells.get(row).get(q).size(); + if (size > 0) { + CellInfo mostRecentCellInfo = insertedCells.get(row).get(q).get(size - 1); + if (mostRecentCellInfo.type == Cell.Type.Put) { + if (maxTimestamp == null || maxTimestamp < mostRecentCellInfo.timestamp) { + maxTimestamp = mostRecentCellInfo.timestamp; + } + } + } + } + return maxTimestamp; + } + + private long getNewTimestamp(long timestamp) throws Exception { + long newTimestamp = System.currentTimeMillis(); + if (timestamp == newTimestamp) { + Thread.sleep(1); + newTimestamp = System.currentTimeMillis(); + assertTrue(timestamp < newTimestamp); + } + return newTimestamp; + } + + private void insertRows(int rowCount) throws Exception { + int row; + long timestamp = System.currentTimeMillis(); + for (int r = 0; r < rowCount; r++) { + row = RANDOM.nextInt(ROW_NUM); + Put put = new Put(Bytes.toBytes(String.valueOf(row)), timestamp); + for (int q = 0; q < qualifiers.length; q++) { + put.addColumn(HBaseTestingUtil.fam1, qualifiers[q], + Bytes.toBytes(String.valueOf(timestamp))); + insertedCells.get(row).get(q).add(new CellInfo(timestamp, Cell.Type.Put)); + } + regions[0].put(put); + regions[1].put(put); + timestamp = getNewTimestamp(timestamp); + } + } + + private void deleteRows(int rowCount) throws Exception { + int row; + for (int r = 0; r < rowCount; r++) { + long timestamp = System.currentTimeMillis(); + row = RANDOM.nextInt(ROW_NUM); + Delete delete = new Delete(Bytes.toBytes(String.valueOf(row))); + regions[0].delete(delete); + regions[1].delete(delete); + // For simplicity, the family delete markers are inserted for all columns (instead of + // allocating a separate column for them) in the memory representation of the data stored + // to HBase + for (int q = 0; q < qualifiers.length; q++) { + insertedCells.get(row).get(q).add(new CellInfo(timestamp, Cell.Type.DeleteFamily)); + } + } + } + + private void deleteSingleRowVersion(int row, long timestamp) throws IOException { + Delete delete = new Delete(Bytes.toBytes(String.valueOf(row))); + delete.addFamilyVersion(HBaseTestingUtil.fam1, timestamp); + regions[0].delete(delete); + regions[1].delete(delete); + // For simplicity, the family delete version markers are inserted for all columns (instead of + // allocating a separate column for them) in the memory representation of the data stored + // to HBase + for (int q = 0; q < qualifiers.length; q++) { + insertedCells.get(row).get(q).add(new CellInfo(timestamp, Cell.Type.DeleteFamilyVersion)); + } + } + + private void deleteRowVersions(int rowCount) throws Exception { + int row; + for (int r = 0; r < rowCount; r++) { + row = RANDOM.nextInt(ROW_NUM); + Long timestamp = getRowTimestamp(row); + if (timestamp != null) { + deleteSingleRowVersion(row, timestamp); + } + } + // Just insert one more delete marker possibly does not delete any row version + row = RANDOM.nextInt(ROW_NUM); + deleteSingleRowVersion(row, System.currentTimeMillis()); + } + + private void deleteColumns(int rowCount) throws Exception { + int row; + for (int r = 0; r < rowCount; r++) { + long timestamp = System.currentTimeMillis(); + row = RANDOM.nextInt(ROW_NUM); + int q = RANDOM.nextInt(qualifiers.length); + Delete delete = new Delete(Bytes.toBytes(String.valueOf(row)), timestamp); + delete.addColumns(HBaseTestingUtil.fam1, qualifiers[q], timestamp); + regions[0].delete(delete); + regions[1].delete(delete); + insertedCells.get(row).get(q).add(new CellInfo(timestamp, Cell.Type.DeleteColumn)); + } + } + + private void deleteColumnVersions(int rowCount) throws Exception { + int row; + for (int r = 0; r < rowCount; r++) { + row = RANDOM.nextInt(ROW_NUM); + Long timestamp = getRowTimestamp(row); + if (timestamp != null) { + Delete delete = new Delete(Bytes.toBytes(String.valueOf(row))); + int q = RANDOM.nextInt(qualifiers.length); + delete.addColumn(HBaseTestingUtil.fam1, qualifiers[q], timestamp); + regions[0].delete(delete); + regions[1].delete(delete); + insertedCells.get(row).get(q).add(new CellInfo(timestamp, Cell.Type.Delete)); + } + } + } + + private Scan createScan(boolean raw) { + Scan scan = new Scan(); + scan.readAllVersions(); + scan.setRaw(raw); + return scan; + } + + private void scanAndCompare(boolean raw) throws Exception { + try (RegionScanner firstRS = regions[0].getScanner(createScan(raw))) { + try (RegionScanner secondRS = regions[1].getScanner(createScan(raw))) { + boolean firstHasMore; + boolean secondHasMore; + do { + List firstRowList = new ArrayList<>(); + List secondRowList = new ArrayList<>(); + firstHasMore = firstRS.nextRaw(firstRowList); + secondHasMore = secondRS.nextRaw(secondRowList); + assertEquals(firstRowList.size(), secondRowList.size()); + int size = firstRowList.size(); + for (int i = 0; i < size; i++) { + Cell firstCell = firstRowList.get(i); + Cell secondCell = secondRowList.get(i); + assertTrue(CellUtil.matchingRowColumn(firstCell, secondCell)); + assertTrue(firstCell.getType() == secondCell.getType()); + assertTrue( + Bytes.equals(CellUtil.cloneValue(firstCell), CellUtil.cloneValue(firstCell))); + } + } while (firstHasMore && secondHasMore); + assertEquals(firstHasMore, secondHasMore); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index b61be8de00cc..ec5401a08b99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -95,15 +95,15 @@ public void testInsertFilesIntoL0() throws Exception { MockHStoreFile sf = createFile(); manager.insertNewFiles(al(sf)); assertEquals(1, manager.getStorefileCount()); - Collection filesForGet = manager.getFilesForScan(KEY_A, true, KEY_A, true); + Collection filesForGet = manager.getFilesForScan(KEY_A, true, KEY_A, true, false); assertEquals(1, filesForGet.size()); assertTrue(filesForGet.contains(sf)); // Add some stripes and make sure we get this file for every stripe. manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, OPEN_KEY))); - assertTrue(manager.getFilesForScan(KEY_A, true, KEY_A, true).contains(sf)); - assertTrue(manager.getFilesForScan(KEY_C, true, KEY_C, true).contains(sf)); + assertTrue(manager.getFilesForScan(KEY_A, true, KEY_A, true, false).contains(sf)); + assertTrue(manager.getFilesForScan(KEY_C, true, KEY_C, true, false).contains(sf)); } @Test @@ -117,7 +117,7 @@ public void testClearFiles() throws Exception { Collection allFiles = manager.clearFiles(); assertEquals(4, allFiles.size()); assertEquals(0, manager.getStorefileCount()); - assertEquals(0, manager.getStorefiles().size()); + assertEquals(0, manager.getStoreFiles().size()); } private static ArrayList dumpIterator(Iterator iter) { @@ -541,7 +541,7 @@ private void testPriorityScenario(int expectedPriority, int limit, int stripes, private void verifyInvalidCompactionScenario(StripeStoreFileManager manager, ArrayList filesToCompact, ArrayList filesToInsert) throws Exception { - Collection allFiles = manager.getStorefiles(); + Collection allFiles = manager.getStoreFiles(); assertThrows(IllegalStateException.class, () -> manager.addCompactionResults(filesToCompact, filesToInsert)); verifyAllFiles(manager, allFiles); // must have the same files. @@ -556,7 +556,7 @@ private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] star Collection results) throws Exception { start = start != null ? start : HConstants.EMPTY_START_ROW; end = end != null ? end : HConstants.EMPTY_END_ROW; - Collection sfs = manager.getFilesForScan(start, true, end, false); + Collection sfs = manager.getFilesForScan(start, true, end, false, false); assertEquals(results.size(), sfs.size()); for (HStoreFile result : results) { assertTrue(sfs.contains(result)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index c4f98f4d94ad..295d0cc4c2fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -245,13 +245,13 @@ public void testWithReferences() throws Exception { when(ref.isReference()).thenReturn(true); StripeInformationProvider si = mock(StripeInformationProvider.class); Collection sfs = al(ref, createFile()); - when(si.getStorefiles()).thenReturn(sfs); + when(si.getStoreFiles()).thenReturn(sfs); assertTrue(policy.needsCompactions(si, al())); StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false); // UnmodifiableCollection does not implement equals so we need to change it here to a // collection that implements it. - assertEquals(si.getStorefiles(), new ArrayList<>(scr.getRequest().getFiles())); + assertEquals(si.getStoreFiles(), new ArrayList<>(scr.getRequest().getFiles())); scr.execute(sc, NoLimitThroughputController.INSTANCE, null); verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), any(), any()); @@ -264,11 +264,11 @@ public void testInitialCountFromL0() throws Exception { StripeCompactionPolicy policy = createPolicy(conf, defaultSplitSize, defaultSplitCount, 2, false); StripeCompactionPolicy.StripeInformationProvider si = createStripesL0Only(3, 8); - verifyCompaction(policy, si, si.getStorefiles(), true, 2, 12L, OPEN_KEY, OPEN_KEY, true); + verifyCompaction(policy, si, si.getStoreFiles(), true, 2, 12L, OPEN_KEY, OPEN_KEY, true); si = createStripesL0Only(3, 10); // If result would be too large, split into smaller parts. - verifyCompaction(policy, si, si.getStorefiles(), true, 3, 10L, OPEN_KEY, OPEN_KEY, true); + verifyCompaction(policy, si, si.getStoreFiles(), true, 3, 10L, OPEN_KEY, OPEN_KEY, true); policy = createPolicy(conf, defaultSplitSize, defaultSplitCount, 6, false); - verifyCompaction(policy, si, si.getStorefiles(), true, 6, 5L, OPEN_KEY, OPEN_KEY, true); + verifyCompaction(policy, si, si.getStoreFiles(), true, 6, 5L, OPEN_KEY, OPEN_KEY, true); } @Test @@ -857,7 +857,7 @@ private static StripeInformationProvider createStripesWithFiles(List bou ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addAllSublists(stripes); sfs.addSublist(l0Files); - when(si.getStorefiles()).thenReturn(sfs); + when(si.getStoreFiles()).thenReturn(sfs); when(si.getStripes()).thenReturn(stripes); when(si.getStripeBoundaries()).thenReturn(boundariesList); when(si.getStripeCount()).thenReturn(stripes.size()); From 3a3dd66e21da3f85c72d75605857713716d579fb Mon Sep 17 00:00:00 2001 From: DieterDP <90392398+DieterDP-ng@users.noreply.github.com> Date: Fri, 17 May 2024 12:48:10 +0200 Subject: [PATCH 364/514] HBASE-28568 Incremental backup set does not correctly shrink (#5876) The incremental backup set is the set of tables included when an incremental backup is created, it is managed per backup root dir and contains all tables that are present in at least one backup (in that root dir). The incremental backup set can only shrink when backups are deleted. However, the implementation was incorrect, causing this set to never be able to shrink. Reviewed-by: Ray Mattingly Signed-off-by: Nick Dimiduk --- .../hbase/backup/impl/BackupAdminImpl.java | 40 ++++++++----------- .../hbase/backup/impl/BackupSystemTable.java | 29 ++++++++------ .../hadoop/hbase/backup/TestBackupDelete.java | 25 ++++++++++++ 3 files changed, 57 insertions(+), 37 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 69aef51a4ed3..de8ca6b7497c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -94,7 +94,6 @@ public BackupInfo getBackupInfo(String backupId) throws IOException { public int deleteBackups(String[] backupIds) throws IOException { int totalDeleted = 0; - Map> allTablesMap = new HashMap<>(); boolean deleteSessionStarted; boolean snapshotDone; @@ -130,20 +129,16 @@ public int deleteBackups(String[] backupIds) throws IOException { } snapshotDone = true; try { + List affectedBackupRootDirs = new ArrayList<>(); for (int i = 0; i < backupIds.length; i++) { BackupInfo info = sysTable.readBackupInfo(backupIds[i]); - if (info != null) { - String rootDir = info.getBackupRootDir(); - HashSet allTables = allTablesMap.get(rootDir); - if (allTables == null) { - allTables = new HashSet<>(); - allTablesMap.put(rootDir, allTables); - } - allTables.addAll(info.getTableNames()); - totalDeleted += deleteBackup(backupIds[i], sysTable); + if (info == null) { + continue; } + affectedBackupRootDirs.add(info.getBackupRootDir()); + totalDeleted += deleteBackup(backupIds[i], sysTable); } - finalizeDelete(allTablesMap, sysTable); + finalizeDelete(affectedBackupRootDirs, sysTable); // Finish sysTable.finishDeleteOperation(); // delete snapshot @@ -176,26 +171,23 @@ public int deleteBackups(String[] backupIds) throws IOException { /** * Updates incremental backup set for every backupRoot - * @param tablesMap map [backupRoot: {@code Set}] - * @param table backup system table + * @param backupRoots backupRoots for which to revise the incremental backup set + * @param table backup system table * @throws IOException if a table operation fails */ - private void finalizeDelete(Map> tablesMap, BackupSystemTable table) + private void finalizeDelete(List backupRoots, BackupSystemTable table) throws IOException { - for (String backupRoot : tablesMap.keySet()) { + for (String backupRoot : backupRoots) { Set incrTableSet = table.getIncrementalBackupTableSet(backupRoot); - Map> tableMap = + Map> tableMap = table.getBackupHistoryForTableSet(incrTableSet, backupRoot); - for (Map.Entry> entry : tableMap.entrySet()) { - if (entry.getValue() == null) { - // No more backups for a table - incrTableSet.remove(entry.getKey()); - } - } + + // Keep only the tables that are present in other backups + incrTableSet.retainAll(tableMap.keySet()); + + table.deleteIncrementalBackupTableSet(backupRoot); if (!incrTableSet.isEmpty()) { table.addIncrementalBackupTableSet(incrTableSet, backupRoot); - } else { // empty - table.deleteIncrementalBackupTableSet(backupRoot); } } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 682757dbc404..c364316d54eb 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -86,7 +86,8 @@ *

    *
  • 1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo
  • *
  • 2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode
  • - *
  • 3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]
  • + *
  • 3. Incremental backup set rowkey="incrbackupset:"+backupRoot; table="meta:"+tablename of + * include table; value=empty
  • *
  • 4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-> last WAL * timestamp]
  • *
  • 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp
  • @@ -839,23 +840,25 @@ public List getBackupHistoryForTable(TableName name) throws IOExcept return tableHistory; } - public Map> getBackupHistoryForTableSet(Set set, + /** + * Goes through all backup history corresponding to the provided root folder, and collects all + * backup info mentioning each of the provided tables. + * @param set the tables for which to collect the {@code BackupInfo} + * @param backupRoot backup destination path to retrieve backup history for + * @return a map containing (a subset of) the provided {@code TableName}s, mapped to a list of at + * least one {@code BackupInfo} + * @throws IOException if getting the backup history fails + */ + public Map> getBackupHistoryForTableSet(Set set, String backupRoot) throws IOException { List history = getBackupHistory(backupRoot); - Map> tableHistoryMap = new HashMap<>(); - for (Iterator iterator = history.iterator(); iterator.hasNext();) { - BackupInfo info = iterator.next(); - if (!backupRoot.equals(info.getBackupRootDir())) { - continue; - } + Map> tableHistoryMap = new HashMap<>(); + for (BackupInfo info : history) { List tables = info.getTableNames(); for (TableName tableName : tables) { if (set.contains(tableName)) { - ArrayList list = tableHistoryMap.get(tableName); - if (list == null) { - list = new ArrayList<>(); - tableHistoryMap.put(tableName, list); - } + List list = + tableHistoryMap.computeIfAbsent(tableName, k -> new ArrayList<>()); list.add(info); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index 0c4d44d489d8..ef40bc63d086 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.ClassRule; @@ -158,4 +160,27 @@ public long currentTime() { LOG.info(baos.toString()); assertTrue(output.indexOf("Deleted 1 backups") >= 0); } + + /** + * Verify that backup deletion updates the incremental-backup-set. + */ + @Test + public void testBackupDeleteUpdatesIncrementalBackupSet() throws Exception { + LOG.info("Test backup delete updates the incremental backup set"); + BackupSystemTable backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection()); + + String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2)); + assertTrue(checkSucceeded(backupId1)); + assertEquals(Sets.newHashSet(table1, table2), + backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); + + String backupId2 = fullTableBackup(Lists.newArrayList(table3)); + assertTrue(checkSucceeded(backupId2)); + assertEquals(Sets.newHashSet(table1, table2, table3), + backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); + + getBackupAdmin().deleteBackups(new String[] { backupId1 }); + assertEquals(Sets.newHashSet(table3), + backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); + } } From 0db26eccdfce271b4a9e9c9137bb9bae533f5cf8 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 May 2024 22:05:44 +0800 Subject: [PATCH 365/514] HBASE-28572 Remove deprecated methods in thrift module (#5882) Signed-off-by: Xin Sun --- .../thrift2/gen-py/hbase/THBaseService-remote | 44 +- .../thrift2/gen-py/hbase/THBaseService.py | 1530 +++++-- .../python/thrift2/gen-py/hbase/constants.py | 2 +- .../python/thrift2/gen-py/hbase/ttypes.py | 656 ++- .../hbase/thrift/generated/AlreadyExists.java | 2 +- .../hbase/thrift/generated/BatchMutation.java | 2 +- .../thrift/generated/ColumnDescriptor.java | 2 +- .../hadoop/hbase/thrift/generated/Hbase.java | 2 +- .../hbase/thrift/generated/IOError.java | 2 +- .../thrift/generated/IllegalArgument.java | 2 +- .../hbase/thrift/generated/Mutation.java | 2 +- .../generated/TAccessControlEntity.java | 2 +- .../hbase/thrift/generated/TAppend.java | 2 +- .../hadoop/hbase/thrift/generated/TCell.java | 2 +- .../hbase/thrift/generated/TColumn.java | 2 +- .../hbase/thrift/generated/TIncrement.java | 2 +- .../thrift/generated/TPermissionScope.java | 2 +- .../hbase/thrift/generated/TRegionInfo.java | 2 +- .../hbase/thrift/generated/TRowResult.java | 2 +- .../hadoop/hbase/thrift/generated/TScan.java | 2 +- .../thrift/generated/TThriftServerType.java | 2 +- .../thrift2/ThriftHBaseServiceHandler.java | 7 - .../generated/TAccessControlEntity.java | 2 +- .../hbase/thrift2/generated/TAppend.java | 2 +- .../thrift2/generated/TAuthorization.java | 2 +- .../thrift2/generated/TBloomFilterType.java | 2 +- .../thrift2/generated/TCellVisibility.java | 2 +- .../hbase/thrift2/generated/TColumn.java | 2 +- .../generated/TColumnFamilyDescriptor.java | 2 +- .../thrift2/generated/TColumnIncrement.java | 2 +- .../hbase/thrift2/generated/TColumnValue.java | 2 +- .../thrift2/generated/TCompareOperator.java | 2 +- .../generated/TCompressionAlgorithm.java | 2 +- .../hbase/thrift2/generated/TConsistency.java | 2 +- .../thrift2/generated/TDataBlockEncoding.java | 2 +- .../hbase/thrift2/generated/TDelete.java | 2 +- .../hbase/thrift2/generated/TDeleteType.java | 2 +- .../hbase/thrift2/generated/TDurability.java | 2 +- .../thrift2/generated/TFilterByOperator.java | 2 +- .../hadoop/hbase/thrift2/generated/TGet.java | 2 +- .../thrift2/generated/THBaseService.java | 3872 ++++++----------- .../hbase/thrift2/generated/THRegionInfo.java | 2 +- .../thrift2/generated/THRegionLocation.java | 2 +- .../hbase/thrift2/generated/TIOError.java | 2 +- .../thrift2/generated/TIllegalArgument.java | 2 +- .../hbase/thrift2/generated/TIncrement.java | 2 +- .../thrift2/generated/TKeepDeletedCells.java | 2 +- .../thrift2/generated/TLogQueryFilter.java | 2 +- .../hbase/thrift2/generated/TLogType.java | 2 +- .../hbase/thrift2/generated/TMutation.java | 2 +- .../generated/TNamespaceDescriptor.java | 2 +- .../thrift2/generated/TOnlineLogRecord.java | 2 +- .../thrift2/generated/TPermissionScope.java | 2 +- .../hadoop/hbase/thrift2/generated/TPut.java | 2 +- .../hbase/thrift2/generated/TReadType.java | 2 +- .../hbase/thrift2/generated/TResult.java | 2 +- .../thrift2/generated/TRowMutations.java | 2 +- .../hadoop/hbase/thrift2/generated/TScan.java | 2 +- .../hbase/thrift2/generated/TServerName.java | 2 +- .../thrift2/generated/TTableDescriptor.java | 2 +- .../hbase/thrift2/generated/TTableName.java | 2 +- .../thrift2/generated/TThriftServerType.java | 2 +- .../hbase/thrift2/generated/TTimeRange.java | 2 +- .../apache/hadoop/hbase/thrift2/hbase.thrift | 18 - 64 files changed, 3221 insertions(+), 3022 deletions(-) diff --git a/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService-remote b/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService-remote index 0264483dfe5d..1ab7508c5220 100755 --- a/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService-remote +++ b/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService-remote @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# Autogenerated by Thrift Compiler (0.12.0) +# Autogenerated by Thrift Compiler (0.14.1) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # @@ -59,7 +59,6 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool isTableEnabled(TTableName tableName)') print(' bool isTableDisabled(TTableName tableName)') print(' bool isTableAvailable(TTableName tableName)') - print(' bool isTableAvailableWithSplit(TTableName tableName, splitKeys)') print(' void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)') print(' void deleteColumnFamily(TTableName tableName, string column)') print(' void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)') @@ -71,6 +70,11 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' listNamespaceDescriptors()') print(' listNamespaces()') print(' TThriftServerType getThriftServerType()') + print(' string getClusterId()') + print(' getSlowLogResponses( serverNames, TLogQueryFilter logQueryFilter)') + print(' clearSlowLogResponses( serverNames)') + print(' bool grant(TAccessControlEntity info)') + print(' bool revoke(TAccessControlEntity info)') print('') sys.exit(0) @@ -360,12 +364,6 @@ elif cmd == 'isTableAvailable': sys.exit(1) pp.pprint(client.isTableAvailable(eval(args[0]),)) -elif cmd == 'isTableAvailableWithSplit': - if len(args) != 2: - print('isTableAvailableWithSplit requires 2 args') - sys.exit(1) - pp.pprint(client.isTableAvailableWithSplit(eval(args[0]), eval(args[1]),)) - elif cmd == 'addColumnFamily': if len(args) != 2: print('addColumnFamily requires 2 args') @@ -432,6 +430,36 @@ elif cmd == 'getThriftServerType': sys.exit(1) pp.pprint(client.getThriftServerType()) +elif cmd == 'getClusterId': + if len(args) != 0: + print('getClusterId requires 0 args') + sys.exit(1) + pp.pprint(client.getClusterId()) + +elif cmd == 'getSlowLogResponses': + if len(args) != 2: + print('getSlowLogResponses requires 2 args') + sys.exit(1) + pp.pprint(client.getSlowLogResponses(eval(args[0]), eval(args[1]),)) + +elif cmd == 'clearSlowLogResponses': + if len(args) != 1: + print('clearSlowLogResponses requires 1 args') + sys.exit(1) + pp.pprint(client.clearSlowLogResponses(eval(args[0]),)) + +elif cmd == 'grant': + if len(args) != 1: + print('grant requires 1 args') + sys.exit(1) + pp.pprint(client.grant(eval(args[0]),)) + +elif cmd == 'revoke': + if len(args) != 1: + print('revoke requires 1 args') + sys.exit(1) + pp.pprint(client.revoke(eval(args[0]),)) + else: print('Unrecognized method %s' % cmd) sys.exit(1) diff --git a/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService.py b/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService.py index 202ed3b92126..133fafc40911 100644 --- a/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService.py +++ b/hbase-examples/src/main/python/thrift2/gen-py/hbase/THBaseService.py @@ -1,5 +1,5 @@ # -# Autogenerated by Thrift Compiler (0.12.0) +# Autogenerated by Thrift Compiler (0.14.1) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # @@ -482,26 +482,6 @@ def isTableAvailable(self, tableName): """ pass - def isTableAvailableWithSplit(self, tableName, splitKeys): - """ - * Use this api to check if the table has been created with the specified number of splitkeys - * which was used while creating the given table. Note : If this api is used after a table's - * region gets splitted, the api may return false. - * - * @return true if table is available, false if not - * - * @deprecated Since 2.2.0. Because the same method in Table interface has been deprecated - * since 2.0.0, we will remove it in 3.0.0 release. - * Use {@link #isTableAvailable(TTableName tableName)} instead - * - - Parameters: - - tableName: the tablename to check - - splitKeys: keys to check if the table has been created with all split keys - - """ - pass - def addColumnFamily(self, tableName, column): """ Add a column family to an existing table. Synchronous operation. @@ -623,6 +603,63 @@ def getThriftServerType(self): """ pass + def getClusterId(self): + """ + Returns the cluster ID for this cluster. + + """ + pass + + def getSlowLogResponses(self, serverNames, logQueryFilter): + """ + Retrieves online slow RPC logs from the provided list of + RegionServers + + @return online slowlog response list + @throws TIOError if a remote or network exception occurs + + Parameters: + - serverNames: @param serverNames Server names to get slowlog responses from + - logQueryFilter: @param logQueryFilter filter to be used if provided + + """ + pass + + def clearSlowLogResponses(self, serverNames): + """ + Clears online slow/large RPC logs from the provided list of + RegionServers + + @return List of booleans representing if online slowlog response buffer is cleaned + from each RegionServer + @throws TIOError if a remote or network exception occurs + + Parameters: + - serverNames: @param serverNames Set of Server names to clean slowlog responses from + + """ + pass + + def grant(self, info): + """ + Grant permissions in table or namespace level. + + Parameters: + - info + + """ + pass + + def revoke(self, info): + """ + Revoke permissions in table or namespace level. + + Parameters: + - info + + """ + pass + class Client(Iface): def __init__(self, iprot, oprot=None): @@ -2025,53 +2062,6 @@ def recv_isTableAvailable(self): raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableAvailable failed: unknown result") - def isTableAvailableWithSplit(self, tableName, splitKeys): - """ - * Use this api to check if the table has been created with the specified number of splitkeys - * which was used while creating the given table. Note : If this api is used after a table's - * region gets splitted, the api may return false. - * - * @return true if table is available, false if not - * - * @deprecated Since 2.2.0. Because the same method in Table interface has been deprecated - * since 2.0.0, we will remove it in 3.0.0 release. - * Use {@link #isTableAvailable(TTableName tableName)} instead - * - - Parameters: - - tableName: the tablename to check - - splitKeys: keys to check if the table has been created with all split keys - - """ - self.send_isTableAvailableWithSplit(tableName, splitKeys) - return self.recv_isTableAvailableWithSplit() - - def send_isTableAvailableWithSplit(self, tableName, splitKeys): - self._oprot.writeMessageBegin('isTableAvailableWithSplit', TMessageType.CALL, self._seqid) - args = isTableAvailableWithSplit_args() - args.tableName = tableName - args.splitKeys = splitKeys - args.write(self._oprot) - self._oprot.writeMessageEnd() - self._oprot.trans.flush() - - def recv_isTableAvailableWithSplit(self): - iprot = self._iprot - (fname, mtype, rseqid) = iprot.readMessageBegin() - if mtype == TMessageType.EXCEPTION: - x = TApplicationException() - x.read(iprot) - iprot.readMessageEnd() - raise x - result = isTableAvailableWithSplit_result() - result.read(iprot) - iprot.readMessageEnd() - if result.success is not None: - return result.success - if result.io is not None: - raise result.io - raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableAvailableWithSplit failed: unknown result") - def addColumnFamily(self, tableName, column): """ Add a column family to an existing table. Synchronous operation. @@ -2463,6 +2453,191 @@ def recv_getThriftServerType(self): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "getThriftServerType failed: unknown result") + def getClusterId(self): + """ + Returns the cluster ID for this cluster. + + """ + self.send_getClusterId() + return self.recv_getClusterId() + + def send_getClusterId(self): + self._oprot.writeMessageBegin('getClusterId', TMessageType.CALL, self._seqid) + args = getClusterId_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getClusterId(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getClusterId_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getClusterId failed: unknown result") + + def getSlowLogResponses(self, serverNames, logQueryFilter): + """ + Retrieves online slow RPC logs from the provided list of + RegionServers + + @return online slowlog response list + @throws TIOError if a remote or network exception occurs + + Parameters: + - serverNames: @param serverNames Server names to get slowlog responses from + - logQueryFilter: @param logQueryFilter filter to be used if provided + + """ + self.send_getSlowLogResponses(serverNames, logQueryFilter) + return self.recv_getSlowLogResponses() + + def send_getSlowLogResponses(self, serverNames, logQueryFilter): + self._oprot.writeMessageBegin('getSlowLogResponses', TMessageType.CALL, self._seqid) + args = getSlowLogResponses_args() + args.serverNames = serverNames + args.logQueryFilter = logQueryFilter + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getSlowLogResponses(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getSlowLogResponses_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.io is not None: + raise result.io + raise TApplicationException(TApplicationException.MISSING_RESULT, "getSlowLogResponses failed: unknown result") + + def clearSlowLogResponses(self, serverNames): + """ + Clears online slow/large RPC logs from the provided list of + RegionServers + + @return List of booleans representing if online slowlog response buffer is cleaned + from each RegionServer + @throws TIOError if a remote or network exception occurs + + Parameters: + - serverNames: @param serverNames Set of Server names to clean slowlog responses from + + """ + self.send_clearSlowLogResponses(serverNames) + return self.recv_clearSlowLogResponses() + + def send_clearSlowLogResponses(self, serverNames): + self._oprot.writeMessageBegin('clearSlowLogResponses', TMessageType.CALL, self._seqid) + args = clearSlowLogResponses_args() + args.serverNames = serverNames + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_clearSlowLogResponses(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = clearSlowLogResponses_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.io is not None: + raise result.io + raise TApplicationException(TApplicationException.MISSING_RESULT, "clearSlowLogResponses failed: unknown result") + + def grant(self, info): + """ + Grant permissions in table or namespace level. + + Parameters: + - info + + """ + self.send_grant(info) + return self.recv_grant() + + def send_grant(self, info): + self._oprot.writeMessageBegin('grant', TMessageType.CALL, self._seqid) + args = grant_args() + args.info = info + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_grant(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = grant_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.io is not None: + raise result.io + raise TApplicationException(TApplicationException.MISSING_RESULT, "grant failed: unknown result") + + def revoke(self, info): + """ + Revoke permissions in table or namespace level. + + Parameters: + - info + + """ + self.send_revoke(info) + return self.recv_revoke() + + def send_revoke(self, info): + self._oprot.writeMessageBegin('revoke', TMessageType.CALL, self._seqid) + args = revoke_args() + args.info = info + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_revoke(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = revoke_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.io is not None: + raise result.io + raise TApplicationException(TApplicationException.MISSING_RESULT, "revoke failed: unknown result") + class Processor(Iface, TProcessor): def __init__(self, handler): @@ -2503,7 +2678,6 @@ def __init__(self, handler): self._processMap["isTableEnabled"] = Processor.process_isTableEnabled self._processMap["isTableDisabled"] = Processor.process_isTableDisabled self._processMap["isTableAvailable"] = Processor.process_isTableAvailable - self._processMap["isTableAvailableWithSplit"] = Processor.process_isTableAvailableWithSplit self._processMap["addColumnFamily"] = Processor.process_addColumnFamily self._processMap["deleteColumnFamily"] = Processor.process_deleteColumnFamily self._processMap["modifyColumnFamily"] = Processor.process_modifyColumnFamily @@ -2515,9 +2689,20 @@ def __init__(self, handler): self._processMap["listNamespaceDescriptors"] = Processor.process_listNamespaceDescriptors self._processMap["listNamespaces"] = Processor.process_listNamespaces self._processMap["getThriftServerType"] = Processor.process_getThriftServerType + self._processMap["getClusterId"] = Processor.process_getClusterId + self._processMap["getSlowLogResponses"] = Processor.process_getSlowLogResponses + self._processMap["clearSlowLogResponses"] = Processor.process_clearSlowLogResponses + self._processMap["grant"] = Processor.process_grant + self._processMap["revoke"] = Processor.process_revoke + self._on_message_begin = None + + def on_message_begin(self, func): + self._on_message_begin = func def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() + if self._on_message_begin: + self._on_message_begin(name, type, seqid) if name not in self._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() @@ -3447,32 +3632,6 @@ def process_isTableAvailable(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() - def process_isTableAvailableWithSplit(self, seqid, iprot, oprot): - args = isTableAvailableWithSplit_args() - args.read(iprot) - iprot.readMessageEnd() - result = isTableAvailableWithSplit_result() - try: - result.success = self._handler.isTableAvailableWithSplit(args.tableName, args.splitKeys) - msg_type = TMessageType.REPLY - except TTransport.TTransportException: - raise - except TIOError as io: - msg_type = TMessageType.REPLY - result.io = io - except TApplicationException as ex: - logging.exception('TApplication exception in handler') - msg_type = TMessageType.EXCEPTION - result = ex - except Exception: - logging.exception('Unexpected exception in handler') - msg_type = TMessageType.EXCEPTION - result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') - oprot.writeMessageBegin("isTableAvailableWithSplit", msg_type, seqid) - result.write(oprot) - oprot.writeMessageEnd() - oprot.trans.flush() - def process_addColumnFamily(self, seqid, iprot, oprot): args = addColumnFamily_args() args.read(iprot) @@ -3756,16 +3915,143 @@ def process_getThriftServerType(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() -# HELPER FUNCTIONS AND STRUCTURES - - -class exists_args(object): - """ - Attributes: - - table: the table to check on - - tget: the TGet to check for + def process_getClusterId(self, seqid, iprot, oprot): + args = getClusterId_args() + args.read(iprot) + iprot.readMessageEnd() + result = getClusterId_result() + try: + result.success = self._handler.getClusterId() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception('TApplication exception in handler') + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception('Unexpected exception in handler') + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("getClusterId", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() - """ + def process_getSlowLogResponses(self, seqid, iprot, oprot): + args = getSlowLogResponses_args() + args.read(iprot) + iprot.readMessageEnd() + result = getSlowLogResponses_result() + try: + result.success = self._handler.getSlowLogResponses(args.serverNames, args.logQueryFilter) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TIOError as io: + msg_type = TMessageType.REPLY + result.io = io + except TApplicationException as ex: + logging.exception('TApplication exception in handler') + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception('Unexpected exception in handler') + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("getSlowLogResponses", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_clearSlowLogResponses(self, seqid, iprot, oprot): + args = clearSlowLogResponses_args() + args.read(iprot) + iprot.readMessageEnd() + result = clearSlowLogResponses_result() + try: + result.success = self._handler.clearSlowLogResponses(args.serverNames) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TIOError as io: + msg_type = TMessageType.REPLY + result.io = io + except TApplicationException as ex: + logging.exception('TApplication exception in handler') + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception('Unexpected exception in handler') + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("clearSlowLogResponses", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_grant(self, seqid, iprot, oprot): + args = grant_args() + args.read(iprot) + iprot.readMessageEnd() + result = grant_result() + try: + result.success = self._handler.grant(args.info) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TIOError as io: + msg_type = TMessageType.REPLY + result.io = io + except TApplicationException as ex: + logging.exception('TApplication exception in handler') + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception('Unexpected exception in handler') + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("grant", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_revoke(self, seqid, iprot, oprot): + args = revoke_args() + args.read(iprot) + iprot.readMessageEnd() + result = revoke_result() + try: + result.success = self._handler.revoke(args.info) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TIOError as io: + msg_type = TMessageType.REPLY + result.io = io + except TApplicationException as ex: + logging.exception('TApplication exception in handler') + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception('Unexpected exception in handler') + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("revoke", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + +# HELPER FUNCTIONS AND STRUCTURES + + +class exists_args(object): + """ + Attributes: + - table: the table to check on + - tget: the TGet to check for + + """ def __init__(self, table=None, tget=None,): @@ -3867,8 +4153,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -4033,8 +4318,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -4190,8 +4474,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -4359,8 +4642,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -4508,8 +4790,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -4713,8 +4994,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -4867,8 +5147,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -5009,8 +5288,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -5172,8 +5450,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -5384,8 +5661,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -5538,8 +5814,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -5692,8 +5967,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -5845,8 +6119,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -6003,14 +6276,12 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.ia = TIllegalArgument() - self.ia.read(iprot) + self.ia = TIllegalArgument.read(iprot) else: iprot.skip(ftype) else: @@ -6150,14 +6421,12 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.ia = TIllegalArgument() - self.ia.read(iprot) + self.ia = TIllegalArgument.read(iprot) else: iprot.skip(ftype) else: @@ -6303,8 +6572,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -6470,8 +6738,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -6638,8 +6905,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -6782,8 +7048,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7007,8 +7272,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7147,8 +7411,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7300,8 +7563,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7440,8 +7702,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7509,7 +7770,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.regex = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.regex = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: @@ -7596,8 +7857,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7666,7 +7926,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: @@ -7743,8 +8003,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7815,7 +8074,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.regex = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.regex = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: @@ -7902,8 +8161,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -7972,7 +8230,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: @@ -8049,8 +8307,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -8204,8 +8461,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -8332,8 +8588,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -8474,8 +8729,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -8602,8 +8856,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -8730,8 +8983,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -8865,8 +9117,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9004,8 +9255,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9143,8 +9393,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9188,165 +9437,6 @@ def __ne__(self, other): ) -class isTableAvailableWithSplit_args(object): - """ - Attributes: - - tableName: the tablename to check - - splitKeys: keys to check if the table has been created with all split keys - - """ - - - def __init__(self, tableName=None, splitKeys=None,): - self.tableName = tableName - self.splitKeys = splitKeys - - def read(self, iprot): - if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: - iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRUCT: - self.tableName = TTableName() - self.tableName.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.LIST: - self.splitKeys = [] - (_etype291, _size288) = iprot.readListBegin() - for _i292 in range(_size288): - _elem293 = iprot.readBinary() - self.splitKeys.append(_elem293) - iprot.readListEnd() - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot._fast_encode is not None and self.thrift_spec is not None: - oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) - return - oprot.writeStructBegin('isTableAvailableWithSplit_args') - if self.tableName is not None: - oprot.writeFieldBegin('tableName', TType.STRUCT, 1) - self.tableName.write(oprot) - oprot.writeFieldEnd() - if self.splitKeys is not None: - oprot.writeFieldBegin('splitKeys', TType.LIST, 2) - oprot.writeListBegin(TType.STRING, len(self.splitKeys)) - for iter294 in self.splitKeys: - oprot.writeBinary(iter294) - oprot.writeListEnd() - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - if self.tableName is None: - raise TProtocolException(message='Required field tableName is unset!') - return - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.items()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) -all_structs.append(isTableAvailableWithSplit_args) -isTableAvailableWithSplit_args.thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'tableName', [TTableName, None], None, ), # 1 - (2, TType.LIST, 'splitKeys', (TType.STRING, 'BINARY', False), None, ), # 2 -) - - -class isTableAvailableWithSplit_result(object): - """ - Attributes: - - success - - io - - """ - - - def __init__(self, success=None, io=None,): - self.success = success - self.io = io - - def read(self, iprot): - if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: - iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot._fast_encode is not None and self.thrift_spec is not None: - oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) - return - oprot.writeStructBegin('isTableAvailableWithSplit_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) - oprot.writeFieldEnd() - if self.io is not None: - oprot.writeFieldBegin('io', TType.STRUCT, 1) - self.io.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.items()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) -all_structs.append(isTableAvailableWithSplit_result) -isTableAvailableWithSplit_result.thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1 -) - - class addColumnFamily_args(object): """ Attributes: @@ -9449,8 +9539,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9591,8 +9680,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9734,8 +9822,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9862,8 +9949,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -9990,8 +10076,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -10118,8 +10203,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -10181,7 +10265,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: @@ -10245,8 +10329,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -10308,7 +10391,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: @@ -10380,8 +10463,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -10493,18 +10575,17 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype298, _size295) = iprot.readListBegin() - for _i299 in range(_size295): - _elem300 = TNamespaceDescriptor() - _elem300.read(iprot) - self.success.append(_elem300) + (_etype291, _size288) = iprot.readListBegin() + for _i292 in range(_size288): + _elem293 = TNamespaceDescriptor() + _elem293.read(iprot) + self.success.append(_elem293) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -10520,8 +10601,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter301 in self.success: - iter301.write(oprot) + for iter294 in self.success: + iter294.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: @@ -10619,17 +10700,16 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype305, _size302) = iprot.readListBegin() - for _i306 in range(_size302): - _elem307 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem307) + (_etype298, _size295) = iprot.readListBegin() + for _i299 in range(_size295): + _elem300 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem300) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.io = TIOError() - self.io.read(iprot) + self.io = TIOError.read(iprot) else: iprot.skip(ftype) else: @@ -10645,8 +10725,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter308 in self.success: - oprot.writeString(iter308.encode('utf-8') if sys.version_info[0] == 2 else iter308) + for iter301 in self.success: + oprot.writeString(iter301.encode('utf-8') if sys.version_info[0] == 2 else iter301) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: @@ -10778,5 +10858,703 @@ def __ne__(self, other): getThriftServerType_result.thrift_spec = ( (0, TType.I32, 'success', None, None, ), # 0 ) + + +class getClusterId_args(object): + + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('getClusterId_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(getClusterId_args) +getClusterId_args.thrift_spec = ( +) + + +class getClusterId_result(object): + """ + Attributes: + - success + + """ + + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('getClusterId_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(getClusterId_result) +getClusterId_result.thrift_spec = ( + (0, TType.STRING, 'success', 'UTF8', None, ), # 0 +) + + +class getSlowLogResponses_args(object): + """ + Attributes: + - serverNames: @param serverNames Server names to get slowlog responses from + - logQueryFilter: @param logQueryFilter filter to be used if provided + + """ + + + def __init__(self, serverNames=None, logQueryFilter=None,): + self.serverNames = serverNames + self.logQueryFilter = logQueryFilter + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.SET: + self.serverNames = set() + (_etype305, _size302) = iprot.readSetBegin() + for _i306 in range(_size302): + _elem307 = TServerName() + _elem307.read(iprot) + self.serverNames.add(_elem307) + iprot.readSetEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.logQueryFilter = TLogQueryFilter() + self.logQueryFilter.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('getSlowLogResponses_args') + if self.serverNames is not None: + oprot.writeFieldBegin('serverNames', TType.SET, 1) + oprot.writeSetBegin(TType.STRUCT, len(self.serverNames)) + for iter308 in self.serverNames: + iter308.write(oprot) + oprot.writeSetEnd() + oprot.writeFieldEnd() + if self.logQueryFilter is not None: + oprot.writeFieldBegin('logQueryFilter', TType.STRUCT, 2) + self.logQueryFilter.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(getSlowLogResponses_args) +getSlowLogResponses_args.thrift_spec = ( + None, # 0 + (1, TType.SET, 'serverNames', (TType.STRUCT, [TServerName, None], False), None, ), # 1 + (2, TType.STRUCT, 'logQueryFilter', [TLogQueryFilter, None], None, ), # 2 +) + + +class getSlowLogResponses_result(object): + """ + Attributes: + - success + - io + + """ + + + def __init__(self, success=None, io=None,): + self.success = success + self.io = io + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype312, _size309) = iprot.readListBegin() + for _i313 in range(_size309): + _elem314 = TOnlineLogRecord() + _elem314.read(iprot) + self.success.append(_elem314) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.io = TIOError.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('getSlowLogResponses_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter315 in self.success: + iter315.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.io is not None: + oprot.writeFieldBegin('io', TType.STRUCT, 1) + self.io.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(getSlowLogResponses_result) +getSlowLogResponses_result.thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT, [TOnlineLogRecord, None], False), None, ), # 0 + (1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1 +) + + +class clearSlowLogResponses_args(object): + """ + Attributes: + - serverNames: @param serverNames Set of Server names to clean slowlog responses from + + """ + + + def __init__(self, serverNames=None,): + self.serverNames = serverNames + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.SET: + self.serverNames = set() + (_etype319, _size316) = iprot.readSetBegin() + for _i320 in range(_size316): + _elem321 = TServerName() + _elem321.read(iprot) + self.serverNames.add(_elem321) + iprot.readSetEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('clearSlowLogResponses_args') + if self.serverNames is not None: + oprot.writeFieldBegin('serverNames', TType.SET, 1) + oprot.writeSetBegin(TType.STRUCT, len(self.serverNames)) + for iter322 in self.serverNames: + iter322.write(oprot) + oprot.writeSetEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(clearSlowLogResponses_args) +clearSlowLogResponses_args.thrift_spec = ( + None, # 0 + (1, TType.SET, 'serverNames', (TType.STRUCT, [TServerName, None], False), None, ), # 1 +) + + +class clearSlowLogResponses_result(object): + """ + Attributes: + - success + - io + + """ + + + def __init__(self, success=None, io=None,): + self.success = success + self.io = io + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype326, _size323) = iprot.readListBegin() + for _i327 in range(_size323): + _elem328 = iprot.readBool() + self.success.append(_elem328) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.io = TIOError.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('clearSlowLogResponses_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.BOOL, len(self.success)) + for iter329 in self.success: + oprot.writeBool(iter329) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.io is not None: + oprot.writeFieldBegin('io', TType.STRUCT, 1) + self.io.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(clearSlowLogResponses_result) +clearSlowLogResponses_result.thrift_spec = ( + (0, TType.LIST, 'success', (TType.BOOL, None, False), None, ), # 0 + (1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1 +) + + +class grant_args(object): + """ + Attributes: + - info + + """ + + + def __init__(self, info=None,): + self.info = info + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.info = TAccessControlEntity() + self.info.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('grant_args') + if self.info is not None: + oprot.writeFieldBegin('info', TType.STRUCT, 1) + self.info.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.info is None: + raise TProtocolException(message='Required field info is unset!') + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(grant_args) +grant_args.thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'info', [TAccessControlEntity, None], None, ), # 1 +) + + +class grant_result(object): + """ + Attributes: + - success + - io + + """ + + + def __init__(self, success=None, io=None,): + self.success = success + self.io = io + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.io = TIOError.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('grant_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.io is not None: + oprot.writeFieldBegin('io', TType.STRUCT, 1) + self.io.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(grant_result) +grant_result.thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1 +) + + +class revoke_args(object): + """ + Attributes: + - info + + """ + + + def __init__(self, info=None,): + self.info = info + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.info = TAccessControlEntity() + self.info.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('revoke_args') + if self.info is not None: + oprot.writeFieldBegin('info', TType.STRUCT, 1) + self.info.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.info is None: + raise TProtocolException(message='Required field info is unset!') + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(revoke_args) +revoke_args.thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'info', [TAccessControlEntity, None], None, ), # 1 +) + + +class revoke_result(object): + """ + Attributes: + - success + - io + + """ + + + def __init__(self, success=None, io=None,): + self.success = success + self.io = io + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.io = TIOError.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('revoke_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.io is not None: + oprot.writeFieldBegin('io', TType.STRUCT, 1) + self.io.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) +all_structs.append(revoke_result) +revoke_result.thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'io', [TIOError, None], None, ), # 1 +) fix_spec(all_structs) del all_structs diff --git a/hbase-examples/src/main/python/thrift2/gen-py/hbase/constants.py b/hbase-examples/src/main/python/thrift2/gen-py/hbase/constants.py index c59352d09f18..69c181ade385 100644 --- a/hbase-examples/src/main/python/thrift2/gen-py/hbase/constants.py +++ b/hbase-examples/src/main/python/thrift2/gen-py/hbase/constants.py @@ -1,5 +1,5 @@ # -# Autogenerated by Thrift Compiler (0.12.0) +# Autogenerated by Thrift Compiler (0.14.1) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # diff --git a/hbase-examples/src/main/python/thrift2/gen-py/hbase/ttypes.py b/hbase-examples/src/main/python/thrift2/gen-py/hbase/ttypes.py index 80b268f84f6d..797ae9ea0b7a 100644 --- a/hbase-examples/src/main/python/thrift2/gen-py/hbase/ttypes.py +++ b/hbase-examples/src/main/python/thrift2/gen-py/hbase/ttypes.py @@ -1,5 +1,5 @@ # -# Autogenerated by Thrift Compiler (0.12.0) +# Autogenerated by Thrift Compiler (0.14.1) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # @@ -262,6 +262,36 @@ class TKeepDeletedCells(object): } +class TLogType(object): + SLOW_LOG = 1 + LARGE_LOG = 2 + + _VALUES_TO_NAMES = { + 1: "SLOW_LOG", + 2: "LARGE_LOG", + } + + _NAMES_TO_VALUES = { + "SLOW_LOG": 1, + "LARGE_LOG": 2, + } + + +class TFilterByOperator(object): + AND = 0 + OR = 1 + + _VALUES_TO_NAMES = { + 0: "AND", + 1: "OR", + } + + _NAMES_TO_VALUES = { + "AND": 0, + "OR": 1, + } + + class TThriftServerType(object): """ Specify type of thrift server: thrift and thrift2 @@ -281,6 +311,21 @@ class TThriftServerType(object): } +class TPermissionScope(object): + TABLE = 0 + NAMESPACE = 1 + + _VALUES_TO_NAMES = { + 0: "TABLE", + 1: "NAMESPACE", + } + + _NAMES_TO_VALUES = { + "TABLE": 0, + "NAMESPACE": 1, + } + + class TTimeRange(object): """ Attributes: @@ -771,7 +816,7 @@ def read(self, iprot): self.labels = [] (_etype10, _size7) = iprot.readListBegin() for _i11 in range(_size7): - _elem12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + _elem12 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.labels.append(_elem12) iprot.readListEnd() else: @@ -833,7 +878,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.expression = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.expression = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: @@ -2284,7 +2329,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.hostName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.hostName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: @@ -2568,8 +2613,8 @@ def read(self, iprot): self.configuration = {} (_ktype134, _vtype135, _size133) = iprot.readMapBegin() for _i137 in range(_size133): - _key138 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() - _val139 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + _key138 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val139 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.configuration[_key138] = _val139 iprot.readMapEnd() else: @@ -2919,7 +2964,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: @@ -2927,8 +2972,8 @@ def read(self, iprot): self.configuration = {} (_ktype161, _vtype162, _size160) = iprot.readMapBegin() for _i164 in range(_size160): - _key165 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() - _val166 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + _key165 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val166 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.configuration[_key165] = _val166 iprot.readMapEnd() else: @@ -2975,6 +3020,383 @@ def __ne__(self, other): return not (self == other) +class TLogQueryFilter(object): + """ + Thrift wrapper around + org.apache.hadoop.hbase.client.LogQueryFilter + + Attributes: + - regionName + - clientAddress + - tableName + - userName + - limit + - logType + - filterByOperator + + """ + + + def __init__(self, regionName=None, clientAddress=None, tableName=None, userName=None, limit=10, logType=1, filterByOperator=1,): + self.regionName = regionName + self.clientAddress = clientAddress + self.tableName = tableName + self.userName = userName + self.limit = limit + self.logType = logType + self.filterByOperator = filterByOperator + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.regionName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.clientAddress = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.userName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.limit = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.logType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.filterByOperator = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('TLogQueryFilter') + if self.regionName is not None: + oprot.writeFieldBegin('regionName', TType.STRING, 1) + oprot.writeString(self.regionName.encode('utf-8') if sys.version_info[0] == 2 else self.regionName) + oprot.writeFieldEnd() + if self.clientAddress is not None: + oprot.writeFieldBegin('clientAddress', TType.STRING, 2) + oprot.writeString(self.clientAddress.encode('utf-8') if sys.version_info[0] == 2 else self.clientAddress) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 3) + oprot.writeString(self.tableName.encode('utf-8') if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.userName is not None: + oprot.writeFieldBegin('userName', TType.STRING, 4) + oprot.writeString(self.userName.encode('utf-8') if sys.version_info[0] == 2 else self.userName) + oprot.writeFieldEnd() + if self.limit is not None: + oprot.writeFieldBegin('limit', TType.I32, 5) + oprot.writeI32(self.limit) + oprot.writeFieldEnd() + if self.logType is not None: + oprot.writeFieldBegin('logType', TType.I32, 6) + oprot.writeI32(self.logType) + oprot.writeFieldEnd() + if self.filterByOperator is not None: + oprot.writeFieldBegin('filterByOperator', TType.I32, 7) + oprot.writeI32(self.filterByOperator) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TOnlineLogRecord(object): + """ + Thrift wrapper around + org.apache.hadoop.hbase.client.OnlineLogRecord + + Attributes: + - startTime + - processingTime + - queueTime + - responseSize + - clientAddress + - serverClass + - methodName + - callDetails + - param + - userName + - multiGetsCount + - multiMutationsCount + - multiServiceCalls + - regionName + - blockBytesScanned + - fsReadTime + + """ + + + def __init__(self, startTime=None, processingTime=None, queueTime=None, responseSize=None, clientAddress=None, serverClass=None, methodName=None, callDetails=None, param=None, userName=None, multiGetsCount=None, multiMutationsCount=None, multiServiceCalls=None, regionName=None, blockBytesScanned=None, fsReadTime=None,): + self.startTime = startTime + self.processingTime = processingTime + self.queueTime = queueTime + self.responseSize = responseSize + self.clientAddress = clientAddress + self.serverClass = serverClass + self.methodName = methodName + self.callDetails = callDetails + self.param = param + self.userName = userName + self.multiGetsCount = multiGetsCount + self.multiMutationsCount = multiMutationsCount + self.multiServiceCalls = multiServiceCalls + self.regionName = regionName + self.blockBytesScanned = blockBytesScanned + self.fsReadTime = fsReadTime + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.startTime = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.processingTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.queueTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.responseSize = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.clientAddress = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.serverClass = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.methodName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.callDetails = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.param = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.userName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.I32: + self.multiGetsCount = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.I32: + self.multiMutationsCount = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.I32: + self.multiServiceCalls = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 14: + if ftype == TType.STRING: + self.regionName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 15: + if ftype == TType.I64: + self.blockBytesScanned = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.I64: + self.fsReadTime = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('TOnlineLogRecord') + if self.startTime is not None: + oprot.writeFieldBegin('startTime', TType.I64, 1) + oprot.writeI64(self.startTime) + oprot.writeFieldEnd() + if self.processingTime is not None: + oprot.writeFieldBegin('processingTime', TType.I32, 2) + oprot.writeI32(self.processingTime) + oprot.writeFieldEnd() + if self.queueTime is not None: + oprot.writeFieldBegin('queueTime', TType.I32, 3) + oprot.writeI32(self.queueTime) + oprot.writeFieldEnd() + if self.responseSize is not None: + oprot.writeFieldBegin('responseSize', TType.I64, 4) + oprot.writeI64(self.responseSize) + oprot.writeFieldEnd() + if self.clientAddress is not None: + oprot.writeFieldBegin('clientAddress', TType.STRING, 5) + oprot.writeString(self.clientAddress.encode('utf-8') if sys.version_info[0] == 2 else self.clientAddress) + oprot.writeFieldEnd() + if self.serverClass is not None: + oprot.writeFieldBegin('serverClass', TType.STRING, 6) + oprot.writeString(self.serverClass.encode('utf-8') if sys.version_info[0] == 2 else self.serverClass) + oprot.writeFieldEnd() + if self.methodName is not None: + oprot.writeFieldBegin('methodName', TType.STRING, 7) + oprot.writeString(self.methodName.encode('utf-8') if sys.version_info[0] == 2 else self.methodName) + oprot.writeFieldEnd() + if self.callDetails is not None: + oprot.writeFieldBegin('callDetails', TType.STRING, 8) + oprot.writeString(self.callDetails.encode('utf-8') if sys.version_info[0] == 2 else self.callDetails) + oprot.writeFieldEnd() + if self.param is not None: + oprot.writeFieldBegin('param', TType.STRING, 9) + oprot.writeString(self.param.encode('utf-8') if sys.version_info[0] == 2 else self.param) + oprot.writeFieldEnd() + if self.userName is not None: + oprot.writeFieldBegin('userName', TType.STRING, 10) + oprot.writeString(self.userName.encode('utf-8') if sys.version_info[0] == 2 else self.userName) + oprot.writeFieldEnd() + if self.multiGetsCount is not None: + oprot.writeFieldBegin('multiGetsCount', TType.I32, 11) + oprot.writeI32(self.multiGetsCount) + oprot.writeFieldEnd() + if self.multiMutationsCount is not None: + oprot.writeFieldBegin('multiMutationsCount', TType.I32, 12) + oprot.writeI32(self.multiMutationsCount) + oprot.writeFieldEnd() + if self.multiServiceCalls is not None: + oprot.writeFieldBegin('multiServiceCalls', TType.I32, 13) + oprot.writeI32(self.multiServiceCalls) + oprot.writeFieldEnd() + if self.regionName is not None: + oprot.writeFieldBegin('regionName', TType.STRING, 14) + oprot.writeString(self.regionName.encode('utf-8') if sys.version_info[0] == 2 else self.regionName) + oprot.writeFieldEnd() + if self.blockBytesScanned is not None: + oprot.writeFieldBegin('blockBytesScanned', TType.I64, 15) + oprot.writeI64(self.blockBytesScanned) + oprot.writeFieldEnd() + if self.fsReadTime is not None: + oprot.writeFieldBegin('fsReadTime', TType.I64, 16) + oprot.writeI64(self.fsReadTime) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.startTime is None: + raise TProtocolException(message='Required field startTime is unset!') + if self.processingTime is None: + raise TProtocolException(message='Required field processingTime is unset!') + if self.queueTime is None: + raise TProtocolException(message='Required field queueTime is unset!') + if self.responseSize is None: + raise TProtocolException(message='Required field responseSize is unset!') + if self.clientAddress is None: + raise TProtocolException(message='Required field clientAddress is unset!') + if self.serverClass is None: + raise TProtocolException(message='Required field serverClass is unset!') + if self.methodName is None: + raise TProtocolException(message='Required field methodName is unset!') + if self.callDetails is None: + raise TProtocolException(message='Required field callDetails is unset!') + if self.param is None: + raise TProtocolException(message='Required field param is unset!') + if self.userName is None: + raise TProtocolException(message='Required field userName is unset!') + if self.multiGetsCount is None: + raise TProtocolException(message='Required field multiGetsCount is unset!') + if self.multiMutationsCount is None: + raise TProtocolException(message='Required field multiMutationsCount is unset!') + if self.multiServiceCalls is None: + raise TProtocolException(message='Required field multiServiceCalls is unset!') + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + class TIOError(TException): """ A TIOError exception signals that an error occurred communicating @@ -2983,31 +3405,53 @@ class TIOError(TException): Attributes: - message + - canRetry """ - def __init__(self, message=None,): - self.message = message + def __init__(self, message=None, canRetry=None,): + super(TIOError, self).__setattr__('message', message) + super(TIOError, self).__setattr__('canRetry', canRetry) - def read(self, iprot): - if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: - iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) - return + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message, self.canRetry, )) + + @classmethod + def read(cls, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and cls.thrift_spec is not None: + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) iprot.readStructBegin() + message = None + canRetry = None while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: - self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + canRetry = iprot.readBool() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() + return cls( + message=message, + canRetry=canRetry, + ) def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: @@ -3018,6 +3462,10 @@ def write(self, oprot): oprot.writeFieldBegin('message', TType.STRING, 1) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() + if self.canRetry is not None: + oprot.writeFieldBegin('canRetry', TType.BOOL, 2) + oprot.writeBool(self.canRetry) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -3051,26 +3499,39 @@ class TIllegalArgument(TException): def __init__(self, message=None,): - self.message = message + super(TIllegalArgument, self).__setattr__('message', message) - def read(self, iprot): - if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: - iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) - return + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message, )) + + @classmethod + def read(cls, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and cls.thrift_spec is not None: + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) iprot.readStructBegin() + message = None while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: - self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() + message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() + return cls( + message=message, + ) def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: @@ -3100,6 +3561,115 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + + +class TAccessControlEntity(object): + """ + TAccessControlEntity for permission control + + Attributes: + - username + - scope + - actions + - tableName + - nsName + + """ + + + def __init__(self, username=None, scope=None, actions=None, tableName=None, nsName=None,): + self.username = username + self.scope = scope + self.actions = actions + self.tableName = tableName + self.nsName = nsName + + def read(self, iprot): + if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.username = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.scope = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.actions = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.tableName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.nsName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin('TAccessControlEntity') + if self.username is not None: + oprot.writeFieldBegin('username', TType.STRING, 1) + oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username) + oprot.writeFieldEnd() + if self.scope is not None: + oprot.writeFieldBegin('scope', TType.I32, 2) + oprot.writeI32(self.scope) + oprot.writeFieldEnd() + if self.actions is not None: + oprot.writeFieldBegin('actions', TType.STRING, 4) + oprot.writeString(self.actions.encode('utf-8') if sys.version_info[0] == 2 else self.actions) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 5) + oprot.writeString(self.tableName.encode('utf-8') if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.nsName is not None: + oprot.writeFieldBegin('nsName', TType.STRING, 6) + oprot.writeString(self.nsName.encode('utf-8') if sys.version_info[0] == 2 else self.nsName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.username is None: + raise TProtocolException(message='Required field username is unset!') + if self.scope is None: + raise TProtocolException(message='Required field scope is unset!') + if self.actions is None: + raise TProtocolException(message='Required field actions is unset!') + return + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) all_structs.append(TTimeRange) TTimeRange.thrift_spec = ( None, # 0 @@ -3312,15 +3882,57 @@ def __ne__(self, other): (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.MAP, 'configuration', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 ) +all_structs.append(TLogQueryFilter) +TLogQueryFilter.thrift_spec = ( + None, # 0 + (1, TType.STRING, 'regionName', 'UTF8', None, ), # 1 + (2, TType.STRING, 'clientAddress', 'UTF8', None, ), # 2 + (3, TType.STRING, 'tableName', 'UTF8', None, ), # 3 + (4, TType.STRING, 'userName', 'UTF8', None, ), # 4 + (5, TType.I32, 'limit', None, 10, ), # 5 + (6, TType.I32, 'logType', None, 1, ), # 6 + (7, TType.I32, 'filterByOperator', None, 1, ), # 7 +) +all_structs.append(TOnlineLogRecord) +TOnlineLogRecord.thrift_spec = ( + None, # 0 + (1, TType.I64, 'startTime', None, None, ), # 1 + (2, TType.I32, 'processingTime', None, None, ), # 2 + (3, TType.I32, 'queueTime', None, None, ), # 3 + (4, TType.I64, 'responseSize', None, None, ), # 4 + (5, TType.STRING, 'clientAddress', 'UTF8', None, ), # 5 + (6, TType.STRING, 'serverClass', 'UTF8', None, ), # 6 + (7, TType.STRING, 'methodName', 'UTF8', None, ), # 7 + (8, TType.STRING, 'callDetails', 'UTF8', None, ), # 8 + (9, TType.STRING, 'param', 'UTF8', None, ), # 9 + (10, TType.STRING, 'userName', 'UTF8', None, ), # 10 + (11, TType.I32, 'multiGetsCount', None, None, ), # 11 + (12, TType.I32, 'multiMutationsCount', None, None, ), # 12 + (13, TType.I32, 'multiServiceCalls', None, None, ), # 13 + (14, TType.STRING, 'regionName', 'UTF8', None, ), # 14 + (15, TType.I64, 'blockBytesScanned', None, None, ), # 15 + (16, TType.I64, 'fsReadTime', None, None, ), # 16 +) all_structs.append(TIOError) TIOError.thrift_spec = ( None, # 0 (1, TType.STRING, 'message', 'UTF8', None, ), # 1 + (2, TType.BOOL, 'canRetry', None, None, ), # 2 ) all_structs.append(TIllegalArgument) TIllegalArgument.thrift_spec = ( None, # 0 (1, TType.STRING, 'message', 'UTF8', None, ), # 1 ) +all_structs.append(TAccessControlEntity) +TAccessControlEntity.thrift_spec = ( + None, # 0 + (1, TType.STRING, 'username', 'UTF8', None, ), # 1 + (2, TType.I32, 'scope', None, None, ), # 2 + None, # 3 + (4, TType.STRING, 'actions', 'UTF8', None, ), # 4 + (5, TType.STRING, 'tableName', 'UTF8', None, ), # 5 + (6, TType.STRING, 'nsName', 'UTF8', None, ), # 6 +) fix_spec(all_structs) del all_structs diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java index 612a3ce50083..0fdf62bd90e4 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java @@ -11,7 +11,7 @@ * An AlreadyExists exceptions signals that a table with the specified * name already exists */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class AlreadyExists extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExists"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java index e1ec71d12549..dde17768b4c0 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java @@ -10,7 +10,7 @@ /** * A BatchMutation object is used to apply a number of Mutations to a single row. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class BatchMutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BatchMutation"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java index 58cdc9db506c..39afd8f86430 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java @@ -12,7 +12,7 @@ * such as the number of versions, compression settings, etc. It is * used as input when creating a table or adding a column. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class ColumnDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnDescriptor"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index 43bc7fb60118..5416e4402e54 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-10-05") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class Hbase { public interface Iface { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java index f5f6b565c56c..b0d4f2d327b5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java @@ -12,7 +12,7 @@ * to the Hbase master or an Hbase region server. Also used to return * more general Hbase error conditions. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class IOError extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IOError"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java index a0e2e97827a3..2add59b67732 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java @@ -11,7 +11,7 @@ * An IllegalArgument exception indicates an illegal or invalid * argument was passed into a procedure. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class IllegalArgument extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IllegalArgument"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java index 7bf919fda33e..a5d2ce540a0c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java @@ -10,7 +10,7 @@ /** * A Mutation object is used to either update or delete a column-value. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class Mutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Mutation"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java index 24fcb0586a24..7cb35644b496 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java @@ -10,7 +10,7 @@ /** * TAccessControlEntity for permission control */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TAccessControlEntity implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAccessControlEntity"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java index 3d0333ea30f6..9a7b7103f36d 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java @@ -10,7 +10,7 @@ /** * An Append object is used to specify the parameters for performing the append operation. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java index fe6ccf21d416..eab44eb300e8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java @@ -13,7 +13,7 @@ * the timestamp of a cell to a first-class value, making it easy to take * note of temporal data. Cell is used all the way from HStore up to HTable. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TCell implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCell"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java index 8f486104691b..323f94a888ef 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java @@ -10,7 +10,7 @@ /** * Holds column name and the cell. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java index ee1fdd1d0573..7868a294c097 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java @@ -11,7 +11,7 @@ * For increments that are not incrementColumnValue * equivalents. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java index dc31e774d4b5..e2be41d9b1c2 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift.generated; -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TPermissionScope implements org.apache.thrift.TEnum { TABLE(0), NAMESPACE(1); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java index fdfb11aa8c87..e38d0ae312b8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java @@ -10,7 +10,7 @@ /** * A TRegionInfo contains information about an HTable region. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRegionInfo"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java index d3959114e728..9f65146ac322 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java @@ -10,7 +10,7 @@ /** * Holds row name and then a map of columns to cells. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TRowResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowResult"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java index ec486accf3ba..3d60b6b58b1e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java @@ -10,7 +10,7 @@ /** * A Scan object is used to specify scanner parameters when opening a scanner. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java index 17bdd3e4e40a..8406bde49a44 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java @@ -10,7 +10,7 @@ /** * Specify type of thrift server: thrift and thrift2 */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TThriftServerType implements org.apache.thrift.TEnum { ONE(1), TWO(2); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index a20c7cda09c9..1397bc49b2aa 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -55,7 +55,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; -import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionLocation; @@ -723,12 +722,6 @@ public boolean isTableAvailable(TTableName tableName) throws TIOError, TExceptio } } - @Override - public boolean isTableAvailableWithSplit(TTableName tableName, List splitKeys) - throws TIOError, TException { - throw new NotImplementedException("isTableAvailableWithSplit not supported"); - } - @Override public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, TException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java index b68d7acd7a3d..e7234c0b85ea 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java @@ -10,7 +10,7 @@ /** * TAccessControlEntity for permission control */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TAccessControlEntity implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAccessControlEntity"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java index 33ccfd5cc8f8..f68b9cb2931f 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java index 7962bfa8c26d..9696bbf3e5dc 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TAuthorization implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAuthorization"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java index 35bcfd58f8a3..f9e9ec5bf6ba 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.regionserver.BloomType */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TBloomFilterType implements org.apache.thrift.TEnum { /** * Bloomfilters disabled diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java index 7a29bd7596ad..c142b2cb158e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TCellVisibility implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCellVisibility"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java index 90f7cdec2204..83aa7b5dd6be 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java @@ -12,7 +12,7 @@ * in a HBase table by column family and optionally * a column qualifier and timestamp */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java index 0c48ba68effb..10258ba5ecce 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.client.ColumnFamilyDescriptor */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TColumnFamilyDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnFamilyDescriptor"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java index 2fb514d3a127..72099a51d551 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java @@ -10,7 +10,7 @@ /** * Represents a single cell and the amount to increment it by */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TColumnIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnIncrement"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java index a30487aac56d..070133a372fc 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java @@ -10,7 +10,7 @@ /** * Represents a single cell and its value. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java index 6c749d587869..38d888a772c5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.CompareOperator. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TCompareOperator implements org.apache.thrift.TEnum { LESS(0), LESS_OR_EQUAL(1), diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java index e4deb1078832..d586c9f8e9a0 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.io.compress.Algorithm */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TCompressionAlgorithm implements org.apache.thrift.TEnum { LZO(0), GZ(1), diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java index 17b6d2bc0eba..5194e631bbb6 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java @@ -12,7 +12,7 @@ * - STRONG means reads only from primary region * - TIMELINE means reads might return values from secondary region replicas */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TConsistency implements org.apache.thrift.TEnum { STRONG(1), TIMELINE(2); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java index c3c7429f024a..352abd30d8b5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.io.encoding.DataBlockEncoding */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TDataBlockEncoding implements org.apache.thrift.TEnum { /** * Disable data block encoding. diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java index bb217d13f96d..031e11a7c0bf 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java @@ -33,7 +33,7 @@ * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TDelete implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDelete"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java index 3ccf01ea4300..abec1f8b6db9 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java @@ -12,7 +12,7 @@ * - DELETE_COLUMN means exactly one version will be removed, * - DELETE_COLUMNS means previous versions will also be removed. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TDeleteType implements org.apache.thrift.TEnum { DELETE_COLUMN(0), DELETE_COLUMNS(1), diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java index 638d440c01c9..7ff35002b4c7 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java @@ -14,7 +14,7 @@ * - SYNC_WAL means write the Mutation to the WAL synchronously, * - FSYNC_WAL means Write the Mutation to the WAL synchronously and force the entries to disk. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TDurability implements org.apache.thrift.TEnum { USE_DEFAULT(0), SKIP_WAL(1), diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java index 61ee2f6de513..f50fade73f1f 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TFilterByOperator implements org.apache.thrift.TEnum { AND(0), OR(1); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java index dfecde9ffb22..d8a21b33beb0 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java @@ -20,7 +20,7 @@ * If you specify a time range and a timestamp the range is ignored. * Timestamps on TColumns are ignored. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TGet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGet"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java index 0fde1dbf4753..dea2991b4a93 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class THBaseService { public interface Iface { @@ -395,24 +395,6 @@ public interface Iface { */ public boolean isTableAvailable(TTableName tableName) throws TIOError, org.apache.thrift.TException; - /** - * * Use this api to check if the table has been created with the specified number of splitkeys - * * which was used while creating the given table. Note : If this api is used after a table's - * * region gets splitted, the api may return false. - * * - * * @return true if table is available, false if not - * * - * * @deprecated Since 2.2.0. Because the same method in Table interface has been deprecated - * * since 2.0.0, we will remove it in 3.0.0 release. - * * Use {@link #isTableAvailable(TTableName tableName)} instead - * * - * - * @param tableName the tablename to check - * - * @param splitKeys keys to check if the table has been created with all split keys - */ - public boolean isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys) throws TIOError, org.apache.thrift.TException; - /** * Add a column family to an existing table. Synchronous operation. * @@ -625,8 +607,6 @@ public interface AsyncIface { public void isTableAvailable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1604,33 +1584,6 @@ public boolean recv_isTableAvailable() throws TIOError, org.apache.thrift.TExcep throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableAvailable failed: unknown result"); } - public boolean isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys) throws TIOError, org.apache.thrift.TException - { - send_isTableAvailableWithSplit(tableName, splitKeys); - return recv_isTableAvailableWithSplit(); - } - - public void send_isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys) throws org.apache.thrift.TException - { - isTableAvailableWithSplit_args args = new isTableAvailableWithSplit_args(); - args.setTableName(tableName); - args.setSplitKeys(splitKeys); - sendBase("isTableAvailableWithSplit", args); - } - - public boolean recv_isTableAvailableWithSplit() throws TIOError, org.apache.thrift.TException - { - isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result(); - receiveBase(result, "isTableAvailableWithSplit"); - if (result.isSetSuccess()) { - return result.success; - } - if (result.io != null) { - throw result.io; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableAvailableWithSplit failed: unknown result"); - } - public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException { send_addColumnFamily(tableName, column); @@ -3269,41 +3222,6 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti } } - public void isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - isTableAvailableWithSplit_call method_call = new isTableAvailableWithSplit_call(tableName, splitKeys, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class isTableAvailableWithSplit_call extends org.apache.thrift.async.TAsyncMethodCall { - private TTableName tableName; - private java.util.List splitKeys; - public isTableAvailableWithSplit_call(TTableName tableName, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.tableName = tableName; - this.splitKeys = splitKeys; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableAvailableWithSplit", org.apache.thrift.protocol.TMessageType.CALL, 0)); - isTableAvailableWithSplit_args args = new isTableAvailableWithSplit_args(); - args.setTableName(tableName); - args.setSplitKeys(splitKeys); - args.write(prot); - prot.writeMessageEnd(); - } - - public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new java.lang.IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_isTableAvailableWithSplit(); - } - } - public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); addColumnFamily_call method_call = new addColumnFamily_call(tableName, column, resultHandler, this, ___protocolFactory, ___transport); @@ -3864,7 +3782,6 @@ protected Processor(I iface, java.util.Map extends org.apache.thrift.ProcessFunction { - public isTableAvailableWithSplit() { - super("isTableAvailableWithSplit"); - } - - public isTableAvailableWithSplit_args getEmptyArgsInstance() { - return new isTableAvailableWithSplit_args(); - } - - protected boolean isOneway() { - return false; - } - - @Override - protected boolean rethrowUnhandledExceptions() { - return false; - } - - public isTableAvailableWithSplit_result getResult(I iface, isTableAvailableWithSplit_args args) throws org.apache.thrift.TException { - isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result(); - try { - result.success = iface.isTableAvailableWithSplit(args.tableName, args.splitKeys); - result.setSuccessIsSet(true); - } catch (TIOError io) { - result.io = io; - } - return result; - } - } - public static class addColumnFamily extends org.apache.thrift.ProcessFunction { public addColumnFamily() { super("addColumnFamily"); @@ -5448,7 +5335,6 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction { - public isTableAvailableWithSplit() { - super("isTableAvailableWithSplit"); - } - - public isTableAvailableWithSplit_args getEmptyArgsInstance() { - return new isTableAvailableWithSplit_args(); - } - - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(java.lang.Boolean o) { - isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result(); - result.success = o; - result.setSuccessIsSet(true); - try { - fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - } catch (org.apache.thrift.transport.TTransportException e) { - _LOGGER.error("TTransportException writing to internal frame buffer", e); - fb.close(); - } catch (java.lang.Exception e) { - _LOGGER.error("Exception writing to internal frame buffer", e); - onError(e); - } - } - public void onError(java.lang.Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TSerializable msg; - isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result(); - if (e instanceof TIOError) { - result.io = (TIOError) e; - result.setIoIsSet(true); - msg = result; - } else if (e instanceof org.apache.thrift.transport.TTransportException) { - _LOGGER.error("TTransportException inside handler", e); - fb.close(); - return; - } else if (e instanceof org.apache.thrift.TApplicationException) { - _LOGGER.error("TApplicationException inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TApplicationException)e; - } else { - _LOGGER.error("Exception inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - } catch (java.lang.Exception ex) { - _LOGGER.error("Exception writing to internal frame buffer", ex); - fb.close(); - } - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, isTableAvailableWithSplit_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.isTableAvailableWithSplit(args.tableName, args.splitKeys,resultHandler); - } - } - public static class addColumnFamily extends org.apache.thrift.AsyncProcessFunction { public addColumnFamily() { super("addColumnFamily"); @@ -43143,34 +42963,34 @@ private static S scheme(org.apache. } } - public static class isTableAvailableWithSplit_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableAvailableWithSplit_args"); + public static class addColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("addColumnFamily_args"); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField SPLIT_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("splitKeys", org.apache.thrift.protocol.TType.LIST, (short)2); + private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableAvailableWithSplit_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableAvailableWithSplit_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new addColumnFamily_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new addColumnFamily_argsTupleSchemeFactory(); /** - * the tablename to check + * the tablename to add column family to */ public @org.apache.thrift.annotation.Nullable TTableName tableName; // required /** - * keys to check if the table has been created with all split keys + * column family descriptor of column family to be added */ - public @org.apache.thrift.annotation.Nullable java.util.List splitKeys; // required + public @org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * the tablename to check + * the tablename to add column family to */ TABLE_NAME((short)1, "tableName"), /** - * keys to check if the table has been created with all split keys + * column family descriptor of column family to be added */ - SPLIT_KEYS((short)2, "splitKeys"); + COLUMN((short)2, "column"); private static final java.util.Map byName = new java.util.HashMap(); @@ -43188,8 +43008,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TABLE_NAME return TABLE_NAME; - case 2: // SPLIT_KEYS - return SPLIT_KEYS; + case 2: // COLUMN + return COLUMN; default: return null; } @@ -43236,50 +43056,48 @@ public java.lang.String getFieldName() { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))); - tmpMap.put(_Fields.SPLIT_KEYS, new org.apache.thrift.meta_data.FieldMetaData("splitKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); + tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailableWithSplit_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_args.class, metaDataMap); } - public isTableAvailableWithSplit_args() { + public addColumnFamily_args() { } - public isTableAvailableWithSplit_args( + public addColumnFamily_args( TTableName tableName, - java.util.List splitKeys) + TColumnFamilyDescriptor column) { this(); this.tableName = tableName; - this.splitKeys = splitKeys; + this.column = column; } /** * Performs a deep copy on other. */ - public isTableAvailableWithSplit_args(isTableAvailableWithSplit_args other) { + public addColumnFamily_args(addColumnFamily_args other) { if (other.isSetTableName()) { this.tableName = new TTableName(other.tableName); } - if (other.isSetSplitKeys()) { - java.util.List __this__splitKeys = new java.util.ArrayList(other.splitKeys); - this.splitKeys = __this__splitKeys; + if (other.isSetColumn()) { + this.column = new TColumnFamilyDescriptor(other.column); } } - public isTableAvailableWithSplit_args deepCopy() { - return new isTableAvailableWithSplit_args(this); + public addColumnFamily_args deepCopy() { + return new addColumnFamily_args(this); } @Override public void clear() { this.tableName = null; - this.splitKeys = null; + this.column = null; } /** - * the tablename to check + * the tablename to add column family to */ @org.apache.thrift.annotation.Nullable public TTableName getTableName() { @@ -43287,9 +43105,9 @@ public TTableName getTableName() { } /** - * the tablename to check + * the tablename to add column family to */ - public isTableAvailableWithSplit_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { + public addColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { this.tableName = tableName; return this; } @@ -43309,50 +43127,34 @@ public void setTableNameIsSet(boolean value) { } } - public int getSplitKeysSize() { - return (this.splitKeys == null) ? 0 : this.splitKeys.size(); - } - - @org.apache.thrift.annotation.Nullable - public java.util.Iterator getSplitKeysIterator() { - return (this.splitKeys == null) ? null : this.splitKeys.iterator(); - } - - public void addToSplitKeys(java.nio.ByteBuffer elem) { - if (this.splitKeys == null) { - this.splitKeys = new java.util.ArrayList(); - } - this.splitKeys.add(elem); - } - /** - * keys to check if the table has been created with all split keys + * column family descriptor of column family to be added */ @org.apache.thrift.annotation.Nullable - public java.util.List getSplitKeys() { - return this.splitKeys; + public TColumnFamilyDescriptor getColumn() { + return this.column; } /** - * keys to check if the table has been created with all split keys + * column family descriptor of column family to be added */ - public isTableAvailableWithSplit_args setSplitKeys(@org.apache.thrift.annotation.Nullable java.util.List splitKeys) { - this.splitKeys = splitKeys; + public addColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) { + this.column = column; return this; } - public void unsetSplitKeys() { - this.splitKeys = null; + public void unsetColumn() { + this.column = null; } - /** Returns true if field splitKeys is set (has been assigned a value) and false otherwise */ - public boolean isSetSplitKeys() { - return this.splitKeys != null; + /** Returns true if field column is set (has been assigned a value) and false otherwise */ + public boolean isSetColumn() { + return this.column != null; } - public void setSplitKeysIsSet(boolean value) { + public void setColumnIsSet(boolean value) { if (!value) { - this.splitKeys = null; + this.column = null; } } @@ -43366,11 +43168,11 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; - case SPLIT_KEYS: + case COLUMN: if (value == null) { - unsetSplitKeys(); + unsetColumn(); } else { - setSplitKeys((java.util.List)value); + setColumn((TColumnFamilyDescriptor)value); } break; @@ -43383,8 +43185,8 @@ public java.lang.Object getFieldValue(_Fields field) { case TABLE_NAME: return getTableName(); - case SPLIT_KEYS: - return getSplitKeys(); + case COLUMN: + return getColumn(); } throw new java.lang.IllegalStateException(); @@ -43399,20 +43201,20 @@ public boolean isSet(_Fields field) { switch (field) { case TABLE_NAME: return isSetTableName(); - case SPLIT_KEYS: - return isSetSplitKeys(); + case COLUMN: + return isSetColumn(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof isTableAvailableWithSplit_args) - return this.equals((isTableAvailableWithSplit_args)that); + if (that instanceof addColumnFamily_args) + return this.equals((addColumnFamily_args)that); return false; } - public boolean equals(isTableAvailableWithSplit_args that) { + public boolean equals(addColumnFamily_args that) { if (that == null) return false; if (this == that) @@ -43427,12 +43229,12 @@ public boolean equals(isTableAvailableWithSplit_args that) { return false; } - boolean this_present_splitKeys = true && this.isSetSplitKeys(); - boolean that_present_splitKeys = true && that.isSetSplitKeys(); - if (this_present_splitKeys || that_present_splitKeys) { - if (!(this_present_splitKeys && that_present_splitKeys)) + boolean this_present_column = true && this.isSetColumn(); + boolean that_present_column = true && that.isSetColumn(); + if (this_present_column || that_present_column) { + if (!(this_present_column && that_present_column)) return false; - if (!this.splitKeys.equals(that.splitKeys)) + if (!this.column.equals(that.column)) return false; } @@ -43447,15 +43249,15 @@ public int hashCode() { if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode(); - hashCode = hashCode * 8191 + ((isSetSplitKeys()) ? 131071 : 524287); - if (isSetSplitKeys()) - hashCode = hashCode * 8191 + splitKeys.hashCode(); + hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287); + if (isSetColumn()) + hashCode = hashCode * 8191 + column.hashCode(); return hashCode; } @Override - public int compareTo(isTableAvailableWithSplit_args other) { + public int compareTo(addColumnFamily_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -43472,12 +43274,12 @@ public int compareTo(isTableAvailableWithSplit_args other) { return lastComparison; } } - lastComparison = java.lang.Boolean.compare(isSetSplitKeys(), other.isSetSplitKeys()); + lastComparison = java.lang.Boolean.compare(isSetColumn(), other.isSetColumn()); if (lastComparison != 0) { return lastComparison; } - if (isSetSplitKeys()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.splitKeys, other.splitKeys); + if (isSetColumn()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column, other.column); if (lastComparison != 0) { return lastComparison; } @@ -43500,7 +43302,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("isTableAvailableWithSplit_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("addColumnFamily_args("); boolean first = true; sb.append("tableName:"); @@ -43511,11 +43313,11 @@ public java.lang.String toString() { } first = false; if (!first) sb.append(", "); - sb.append("splitKeys:"); - if (this.splitKeys == null) { + sb.append("column:"); + if (this.column == null) { sb.append("null"); } else { - org.apache.thrift.TBaseHelper.toString(this.splitKeys, sb); + sb.append(this.column); } first = false; sb.append(")"); @@ -43527,10 +43329,16 @@ public void validate() throws org.apache.thrift.TException { if (tableName == null) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString()); } + if (column == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString()); + } // check for sub-struct validity if (tableName != null) { tableName.validate(); } + if (column != null) { + column.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -43549,15 +43357,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class isTableAvailableWithSplit_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableAvailableWithSplit_argsStandardScheme getScheme() { - return new isTableAvailableWithSplit_argsStandardScheme(); + private static class addColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public addColumnFamily_argsStandardScheme getScheme() { + return new addColumnFamily_argsStandardScheme(); } } - private static class isTableAvailableWithSplit_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class addColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -43576,20 +43384,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // SPLIT_KEYS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list326 = iprot.readListBegin(); - struct.splitKeys = new java.util.ArrayList(_list326.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem327; - for (int _i328 = 0; _i328 < _list326.size; ++_i328) - { - _elem327 = iprot.readBinary(); - struct.splitKeys.add(_elem327); - } - iprot.readListEnd(); - } - struct.setSplitKeysIsSet(true); + case 2: // COLUMN + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.column = new TColumnFamilyDescriptor(); + struct.column.read(iprot); + struct.setColumnIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -43605,7 +43404,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -43614,16 +43413,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWi struct.tableName.write(oprot); oprot.writeFieldEnd(); } - if (struct.splitKeys != null) { - oprot.writeFieldBegin(SPLIT_KEYS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.splitKeys.size())); - for (java.nio.ByteBuffer _iter329 : struct.splitKeys) - { - oprot.writeBinary(_iter329); - } - oprot.writeListEnd(); - } + if (struct.column != null) { + oprot.writeFieldBegin(COLUMN_FIELD_DESC); + struct.column.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -43632,54 +43424,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWi } - private static class isTableAvailableWithSplit_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableAvailableWithSplit_argsTupleScheme getScheme() { - return new isTableAvailableWithSplit_argsTupleScheme(); + private static class addColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public addColumnFamily_argsTupleScheme getScheme() { + return new addColumnFamily_argsTupleScheme(); } } - private static class isTableAvailableWithSplit_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class addColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.tableName.write(oprot); - java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetSplitKeys()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSplitKeys()) { - { - oprot.writeI32(struct.splitKeys.size()); - for (java.nio.ByteBuffer _iter330 : struct.splitKeys) - { - oprot.writeBinary(_iter330); - } - } - } + struct.column.write(oprot); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.tableName = new TTableName(); struct.tableName.read(iprot); struct.setTableNameIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list331 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.splitKeys = new java.util.ArrayList(_list331.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem332; - for (int _i333 = 0; _i333 < _list331.size; ++_i333) - { - _elem332 = iprot.readBinary(); - struct.splitKeys.add(_elem332); - } - } - struct.setSplitKeysIsSet(true); - } + struct.column = new TColumnFamilyDescriptor(); + struct.column.read(iprot); + struct.setColumnIsSet(true); } } @@ -43688,21 +43456,18 @@ private static S scheme(org.apache. } } - public static class isTableAvailableWithSplit_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableAvailableWithSplit_result"); + public static class addColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("addColumnFamily_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableAvailableWithSplit_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableAvailableWithSplit_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new addColumnFamily_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new addColumnFamily_resultTupleSchemeFactory(); - public boolean success; // required public @org.apache.thrift.annotation.Nullable TIOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), IO((short)1, "io"); private static final java.util.Map byName = new java.util.HashMap(); @@ -43719,8 +43484,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; case 1: // IO return IO; default: @@ -43764,83 +43527,49 @@ public java.lang.String getFieldName() { } // isset id assignments - private static final int __SUCCESS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailableWithSplit_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_result.class, metaDataMap); } - public isTableAvailableWithSplit_result() { + public addColumnFamily_result() { } - public isTableAvailableWithSplit_result( - boolean success, + public addColumnFamily_result( TIOError io) { this(); - this.success = success; - setSuccessIsSet(true); this.io = io; } /** * Performs a deep copy on other. */ - public isTableAvailableWithSplit_result(isTableAvailableWithSplit_result other) { - __isset_bitfield = other.__isset_bitfield; - this.success = other.success; + public addColumnFamily_result(addColumnFamily_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public isTableAvailableWithSplit_result deepCopy() { - return new isTableAvailableWithSplit_result(this); + public addColumnFamily_result deepCopy() { + return new addColumnFamily_result(this); } @Override public void clear() { - setSuccessIsSet(false); - this.success = false; this.io = null; } - public boolean isSuccess() { - return this.success; - } - - public isTableAvailableWithSplit_result setSuccess(boolean success) { - this.success = success; - setSuccessIsSet(true); - return this; - } - - public void unsetSuccess() { - __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); - } - - public void setSuccessIsSet(boolean value) { - __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); - } - @org.apache.thrift.annotation.Nullable public TIOError getIo() { return this.io; } - public isTableAvailableWithSplit_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public addColumnFamily_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -43862,14 +43591,6 @@ public void setIoIsSet(boolean value) { public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((java.lang.Boolean)value); - } - break; - case IO: if (value == null) { unsetIo(); @@ -43884,9 +43605,6 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return isSuccess(); - case IO: return getIo(); @@ -43901,8 +43619,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); case IO: return isSetIo(); } @@ -43911,26 +43627,17 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof isTableAvailableWithSplit_result) - return this.equals((isTableAvailableWithSplit_result)that); + if (that instanceof addColumnFamily_result) + return this.equals((addColumnFamily_result)that); return false; } - public boolean equals(isTableAvailableWithSplit_result that) { + public boolean equals(addColumnFamily_result that) { if (that == null) return false; if (this == that) return true; - boolean this_present_success = true; - boolean that_present_success = true; - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (this.success != that.success) - return false; - } - boolean this_present_io = true && this.isSetIo(); boolean that_present_io = true && that.isSetIo(); if (this_present_io || that_present_io) { @@ -43947,8 +43654,6 @@ public boolean equals(isTableAvailableWithSplit_result that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((success) ? 131071 : 524287); - hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode(); @@ -43957,23 +43662,13 @@ public int hashCode() { } @Override - public int compareTo(isTableAvailableWithSplit_result other) { + public int compareTo(addColumnFamily_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); if (lastComparison != 0) { return lastComparison; @@ -44002,13 +43697,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("isTableAvailableWithSplit_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("addColumnFamily_result("); boolean first = true; - sb.append("success:"); - sb.append(this.success); - first = false; - if (!first) sb.append(", "); sb.append("io:"); if (this.io == null) { sb.append("null"); @@ -44035,23 +43726,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class isTableAvailableWithSplit_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableAvailableWithSplit_resultStandardScheme getScheme() { - return new isTableAvailableWithSplit_resultStandardScheme(); + private static class addColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public addColumnFamily_resultStandardScheme getScheme() { + return new addColumnFamily_resultStandardScheme(); } } - private static class isTableAvailableWithSplit_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class addColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -44061,14 +43750,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; case 1: // IO if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.io = new TIOError(); @@ -44089,15 +43770,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); - oprot.writeFieldEnd(); - } if (struct.io != null) { oprot.writeFieldBegin(IO_FIELD_DESC); struct.io.write(oprot); @@ -44109,42 +43785,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWi } - private static class isTableAvailableWithSplit_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableAvailableWithSplit_resultTupleScheme getScheme() { - return new isTableAvailableWithSplit_resultTupleScheme(); + private static class addColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public addColumnFamily_resultTupleScheme getScheme() { + return new addColumnFamily_resultTupleScheme(); } } - private static class isTableAvailableWithSplit_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class addColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } if (struct.isSetIo()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + optionals.set(0); } + oprot.writeBitSet(optionals, 1); if (struct.isSetIo()) { struct.io.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = iprot.readBool(); - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { struct.io = new TIOError(); struct.io.read(iprot); struct.setIoIsSet(true); @@ -44157,32 +43823,32 @@ private static S scheme(org.apache. } } - public static class addColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("addColumnFamily_args"); + public static class deleteColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteColumnFamily_args"); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new addColumnFamily_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new addColumnFamily_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteColumnFamily_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteColumnFamily_argsTupleSchemeFactory(); /** - * the tablename to add column family to + * the tablename to delete column family from */ public @org.apache.thrift.annotation.Nullable TTableName tableName; // required /** - * column family descriptor of column family to be added + * name of column family to be deleted */ - public @org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column; // required + public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * the tablename to add column family to + * the tablename to delete column family from */ TABLE_NAME((short)1, "tableName"), /** - * column family descriptor of column family to be added + * name of column family to be deleted */ COLUMN((short)2, "column"); @@ -44251,37 +43917,37 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))); tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_args.class, metaDataMap); } - public addColumnFamily_args() { + public deleteColumnFamily_args() { } - public addColumnFamily_args( + public deleteColumnFamily_args( TTableName tableName, - TColumnFamilyDescriptor column) + java.nio.ByteBuffer column) { this(); this.tableName = tableName; - this.column = column; + this.column = org.apache.thrift.TBaseHelper.copyBinary(column); } /** * Performs a deep copy on other. */ - public addColumnFamily_args(addColumnFamily_args other) { + public deleteColumnFamily_args(deleteColumnFamily_args other) { if (other.isSetTableName()) { this.tableName = new TTableName(other.tableName); } if (other.isSetColumn()) { - this.column = new TColumnFamilyDescriptor(other.column); + this.column = org.apache.thrift.TBaseHelper.copyBinary(other.column); } } - public addColumnFamily_args deepCopy() { - return new addColumnFamily_args(this); + public deleteColumnFamily_args deepCopy() { + return new deleteColumnFamily_args(this); } @Override @@ -44291,7 +43957,7 @@ public void clear() { } /** - * the tablename to add column family to + * the tablename to delete column family from */ @org.apache.thrift.annotation.Nullable public TTableName getTableName() { @@ -44299,9 +43965,9 @@ public TTableName getTableName() { } /** - * the tablename to add column family to + * the tablename to delete column family from */ - public addColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { + public deleteColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { this.tableName = tableName; return this; } @@ -44322,18 +43988,27 @@ public void setTableNameIsSet(boolean value) { } /** - * column family descriptor of column family to be added + * name of column family to be deleted */ - @org.apache.thrift.annotation.Nullable - public TColumnFamilyDescriptor getColumn() { - return this.column; + public byte[] getColumn() { + setColumn(org.apache.thrift.TBaseHelper.rightSize(column)); + return column == null ? null : column.array(); + } + + public java.nio.ByteBuffer bufferForColumn() { + return org.apache.thrift.TBaseHelper.copyBinary(column); } /** - * column family descriptor of column family to be added + * name of column family to be deleted */ - public addColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) { - this.column = column; + public deleteColumnFamily_args setColumn(byte[] column) { + this.column = column == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(column.clone()); + return this; + } + + public deleteColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) { + this.column = org.apache.thrift.TBaseHelper.copyBinary(column); return this; } @@ -44366,7 +44041,11 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetColumn(); } else { - setColumn((TColumnFamilyDescriptor)value); + if (value instanceof byte[]) { + setColumn((byte[])value); + } else { + setColumn((java.nio.ByteBuffer)value); + } } break; @@ -44403,12 +44082,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof addColumnFamily_args) - return this.equals((addColumnFamily_args)that); + if (that instanceof deleteColumnFamily_args) + return this.equals((deleteColumnFamily_args)that); return false; } - public boolean equals(addColumnFamily_args that) { + public boolean equals(deleteColumnFamily_args that) { if (that == null) return false; if (this == that) @@ -44451,7 +44130,7 @@ public int hashCode() { } @Override - public int compareTo(addColumnFamily_args other) { + public int compareTo(deleteColumnFamily_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -44496,7 +44175,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("addColumnFamily_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteColumnFamily_args("); boolean first = true; sb.append("tableName:"); @@ -44511,7 +44190,7 @@ public java.lang.String toString() { if (this.column == null) { sb.append("null"); } else { - sb.append(this.column); + org.apache.thrift.TBaseHelper.toString(this.column, sb); } first = false; sb.append(")"); @@ -44530,9 +44209,6 @@ public void validate() throws org.apache.thrift.TException { if (tableName != null) { tableName.validate(); } - if (column != null) { - column.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -44551,15 +44227,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class addColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public addColumnFamily_argsStandardScheme getScheme() { - return new addColumnFamily_argsStandardScheme(); + private static class deleteColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteColumnFamily_argsStandardScheme getScheme() { + return new deleteColumnFamily_argsStandardScheme(); } } - private static class addColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class deleteColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -44579,9 +44255,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_arg } break; case 2: // COLUMN - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.column = new TColumnFamilyDescriptor(); - struct.column.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.column = iprot.readBinary(); struct.setColumnIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -44598,7 +44273,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -44609,7 +44284,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_ar } if (struct.column != null) { oprot.writeFieldBegin(COLUMN_FIELD_DESC); - struct.column.write(oprot); + oprot.writeBinary(struct.column); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -44618,29 +44293,28 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_ar } - private static class addColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public addColumnFamily_argsTupleScheme getScheme() { - return new addColumnFamily_argsTupleScheme(); + private static class deleteColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteColumnFamily_argsTupleScheme getScheme() { + return new deleteColumnFamily_argsTupleScheme(); } } - private static class addColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class deleteColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.tableName.write(oprot); - struct.column.write(oprot); + oprot.writeBinary(struct.column); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.tableName = new TTableName(); struct.tableName.read(iprot); struct.setTableNameIsSet(true); - struct.column = new TColumnFamilyDescriptor(); - struct.column.read(iprot); + struct.column = iprot.readBinary(); struct.setColumnIsSet(true); } } @@ -44650,13 +44324,13 @@ private static S scheme(org.apache. } } - public static class addColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("addColumnFamily_result"); + public static class deleteColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteColumnFamily_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new addColumnFamily_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new addColumnFamily_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteColumnFamily_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteColumnFamily_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable TIOError io; // required @@ -44727,13 +44401,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_result.class, metaDataMap); } - public addColumnFamily_result() { + public deleteColumnFamily_result() { } - public addColumnFamily_result( + public deleteColumnFamily_result( TIOError io) { this(); @@ -44743,14 +44417,14 @@ public addColumnFamily_result( /** * Performs a deep copy on other. */ - public addColumnFamily_result(addColumnFamily_result other) { + public deleteColumnFamily_result(deleteColumnFamily_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public addColumnFamily_result deepCopy() { - return new addColumnFamily_result(this); + public deleteColumnFamily_result deepCopy() { + return new deleteColumnFamily_result(this); } @Override @@ -44763,7 +44437,7 @@ public TIOError getIo() { return this.io; } - public addColumnFamily_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public deleteColumnFamily_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -44821,12 +44495,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof addColumnFamily_result) - return this.equals((addColumnFamily_result)that); + if (that instanceof deleteColumnFamily_result) + return this.equals((deleteColumnFamily_result)that); return false; } - public boolean equals(addColumnFamily_result that) { + public boolean equals(deleteColumnFamily_result that) { if (that == null) return false; if (this == that) @@ -44856,7 +44530,7 @@ public int hashCode() { } @Override - public int compareTo(addColumnFamily_result other) { + public int compareTo(deleteColumnFamily_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -44891,7 +44565,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("addColumnFamily_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteColumnFamily_result("); boolean first = true; sb.append("io:"); @@ -44926,15 +44600,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class addColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public addColumnFamily_resultStandardScheme getScheme() { - return new addColumnFamily_resultStandardScheme(); + private static class deleteColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteColumnFamily_resultStandardScheme getScheme() { + return new deleteColumnFamily_resultStandardScheme(); } } - private static class addColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class deleteColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -44964,7 +44638,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -44979,16 +44653,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_re } - private static class addColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public addColumnFamily_resultTupleScheme getScheme() { - return new addColumnFamily_resultTupleScheme(); + private static class deleteColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteColumnFamily_resultTupleScheme getScheme() { + return new deleteColumnFamily_resultTupleScheme(); } } - private static class addColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class deleteColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -45001,7 +44675,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_res } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -45017,32 +44691,32 @@ private static S scheme(org.apache. } } - public static class deleteColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteColumnFamily_args"); + public static class modifyColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyColumnFamily_args"); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteColumnFamily_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteColumnFamily_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyColumnFamily_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyColumnFamily_argsTupleSchemeFactory(); /** - * the tablename to delete column family from + * the tablename to modify column family */ public @org.apache.thrift.annotation.Nullable TTableName tableName; // required /** - * name of column family to be deleted + * column family descriptor of column family to be modified */ - public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column; // required + public @org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * the tablename to delete column family from + * the tablename to modify column family */ TABLE_NAME((short)1, "tableName"), /** - * name of column family to be deleted + * column family descriptor of column family to be modified */ COLUMN((short)2, "column"); @@ -45111,37 +44785,37 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))); tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_args.class, metaDataMap); } - public deleteColumnFamily_args() { + public modifyColumnFamily_args() { } - public deleteColumnFamily_args( + public modifyColumnFamily_args( TTableName tableName, - java.nio.ByteBuffer column) + TColumnFamilyDescriptor column) { this(); this.tableName = tableName; - this.column = org.apache.thrift.TBaseHelper.copyBinary(column); + this.column = column; } /** * Performs a deep copy on other. */ - public deleteColumnFamily_args(deleteColumnFamily_args other) { + public modifyColumnFamily_args(modifyColumnFamily_args other) { if (other.isSetTableName()) { this.tableName = new TTableName(other.tableName); } if (other.isSetColumn()) { - this.column = org.apache.thrift.TBaseHelper.copyBinary(other.column); + this.column = new TColumnFamilyDescriptor(other.column); } } - public deleteColumnFamily_args deepCopy() { - return new deleteColumnFamily_args(this); + public modifyColumnFamily_args deepCopy() { + return new modifyColumnFamily_args(this); } @Override @@ -45151,7 +44825,7 @@ public void clear() { } /** - * the tablename to delete column family from + * the tablename to modify column family */ @org.apache.thrift.annotation.Nullable public TTableName getTableName() { @@ -45159,9 +44833,9 @@ public TTableName getTableName() { } /** - * the tablename to delete column family from + * the tablename to modify column family */ - public deleteColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { + public modifyColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { this.tableName = tableName; return this; } @@ -45182,27 +44856,18 @@ public void setTableNameIsSet(boolean value) { } /** - * name of column family to be deleted + * column family descriptor of column family to be modified */ - public byte[] getColumn() { - setColumn(org.apache.thrift.TBaseHelper.rightSize(column)); - return column == null ? null : column.array(); - } - - public java.nio.ByteBuffer bufferForColumn() { - return org.apache.thrift.TBaseHelper.copyBinary(column); + @org.apache.thrift.annotation.Nullable + public TColumnFamilyDescriptor getColumn() { + return this.column; } /** - * name of column family to be deleted + * column family descriptor of column family to be modified */ - public deleteColumnFamily_args setColumn(byte[] column) { - this.column = column == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(column.clone()); - return this; - } - - public deleteColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) { - this.column = org.apache.thrift.TBaseHelper.copyBinary(column); + public modifyColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) { + this.column = column; return this; } @@ -45235,11 +44900,7 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetColumn(); } else { - if (value instanceof byte[]) { - setColumn((byte[])value); - } else { - setColumn((java.nio.ByteBuffer)value); - } + setColumn((TColumnFamilyDescriptor)value); } break; @@ -45276,12 +44937,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof deleteColumnFamily_args) - return this.equals((deleteColumnFamily_args)that); + if (that instanceof modifyColumnFamily_args) + return this.equals((modifyColumnFamily_args)that); return false; } - public boolean equals(deleteColumnFamily_args that) { + public boolean equals(modifyColumnFamily_args that) { if (that == null) return false; if (this == that) @@ -45324,7 +44985,7 @@ public int hashCode() { } @Override - public int compareTo(deleteColumnFamily_args other) { + public int compareTo(modifyColumnFamily_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -45369,7 +45030,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteColumnFamily_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyColumnFamily_args("); boolean first = true; sb.append("tableName:"); @@ -45384,7 +45045,7 @@ public java.lang.String toString() { if (this.column == null) { sb.append("null"); } else { - org.apache.thrift.TBaseHelper.toString(this.column, sb); + sb.append(this.column); } first = false; sb.append(")"); @@ -45403,6 +45064,9 @@ public void validate() throws org.apache.thrift.TException { if (tableName != null) { tableName.validate(); } + if (column != null) { + column.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -45421,15 +45085,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class deleteColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteColumnFamily_argsStandardScheme getScheme() { - return new deleteColumnFamily_argsStandardScheme(); + private static class modifyColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyColumnFamily_argsStandardScheme getScheme() { + return new modifyColumnFamily_argsStandardScheme(); } } - private static class deleteColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class modifyColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -45449,8 +45113,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_ } break; case 2: // COLUMN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.column = iprot.readBinary(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.column = new TColumnFamilyDescriptor(); + struct.column.read(iprot); struct.setColumnIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -45467,7 +45132,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -45478,7 +45143,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily } if (struct.column != null) { oprot.writeFieldBegin(COLUMN_FIELD_DESC); - oprot.writeBinary(struct.column); + struct.column.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -45487,28 +45152,29 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily } - private static class deleteColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteColumnFamily_argsTupleScheme getScheme() { - return new deleteColumnFamily_argsTupleScheme(); + private static class modifyColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyColumnFamily_argsTupleScheme getScheme() { + return new modifyColumnFamily_argsTupleScheme(); } } - private static class deleteColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class modifyColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.tableName.write(oprot); - oprot.writeBinary(struct.column); + struct.column.write(oprot); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.tableName = new TTableName(); struct.tableName.read(iprot); struct.setTableNameIsSet(true); - struct.column = iprot.readBinary(); + struct.column = new TColumnFamilyDescriptor(); + struct.column.read(iprot); struct.setColumnIsSet(true); } } @@ -45518,13 +45184,13 @@ private static S scheme(org.apache. } } - public static class deleteColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteColumnFamily_result"); + public static class modifyColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyColumnFamily_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteColumnFamily_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteColumnFamily_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyColumnFamily_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyColumnFamily_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable TIOError io; // required @@ -45595,13 +45261,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_result.class, metaDataMap); } - public deleteColumnFamily_result() { + public modifyColumnFamily_result() { } - public deleteColumnFamily_result( + public modifyColumnFamily_result( TIOError io) { this(); @@ -45611,14 +45277,14 @@ public deleteColumnFamily_result( /** * Performs a deep copy on other. */ - public deleteColumnFamily_result(deleteColumnFamily_result other) { + public modifyColumnFamily_result(modifyColumnFamily_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public deleteColumnFamily_result deepCopy() { - return new deleteColumnFamily_result(this); + public modifyColumnFamily_result deepCopy() { + return new modifyColumnFamily_result(this); } @Override @@ -45631,7 +45297,7 @@ public TIOError getIo() { return this.io; } - public deleteColumnFamily_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public modifyColumnFamily_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -45689,12 +45355,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof deleteColumnFamily_result) - return this.equals((deleteColumnFamily_result)that); + if (that instanceof modifyColumnFamily_result) + return this.equals((modifyColumnFamily_result)that); return false; } - public boolean equals(deleteColumnFamily_result that) { + public boolean equals(modifyColumnFamily_result that) { if (that == null) return false; if (this == that) @@ -45724,7 +45390,7 @@ public int hashCode() { } @Override - public int compareTo(deleteColumnFamily_result other) { + public int compareTo(modifyColumnFamily_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -45759,7 +45425,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteColumnFamily_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyColumnFamily_result("); boolean first = true; sb.append("io:"); @@ -45794,15 +45460,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class deleteColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteColumnFamily_resultStandardScheme getScheme() { - return new deleteColumnFamily_resultStandardScheme(); + private static class modifyColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyColumnFamily_resultStandardScheme getScheme() { + return new modifyColumnFamily_resultStandardScheme(); } } - private static class deleteColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class modifyColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -45832,7 +45498,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -45847,16 +45513,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily } - private static class deleteColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteColumnFamily_resultTupleScheme getScheme() { - return new deleteColumnFamily_resultTupleScheme(); + private static class modifyColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyColumnFamily_resultTupleScheme getScheme() { + return new modifyColumnFamily_resultTupleScheme(); } } - private static class deleteColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class modifyColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -45869,7 +45535,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -45885,34 +45551,25 @@ private static S scheme(org.apache. } } - public static class modifyColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyColumnFamily_args"); + public static class modifyTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyTable_args"); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyColumnFamily_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyColumnFamily_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyTable_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyTable_argsTupleSchemeFactory(); /** - * the tablename to modify column family - */ - public @org.apache.thrift.annotation.Nullable TTableName tableName; // required - /** - * column family descriptor of column family to be modified + * the descriptor of the table to modify */ - public @org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column; // required + public @org.apache.thrift.annotation.Nullable TTableDescriptor desc; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * the tablename to modify column family - */ - TABLE_NAME((short)1, "tableName"), - /** - * column family descriptor of column family to be modified + * the descriptor of the table to modify */ - COLUMN((short)2, "column"); + DESC((short)1, "desc"); private static final java.util.Map byName = new java.util.HashMap(); @@ -45928,10 +45585,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_NAME - return TABLE_NAME; - case 2: // COLUMN - return COLUMN; + case 1: // DESC + return DESC; default: return null; } @@ -45976,125 +45631,78 @@ public java.lang.String getFieldName() { public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))); - tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class))); + tmpMap.put(_Fields.DESC, new org.apache.thrift.meta_data.FieldMetaData("desc", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_args.class, metaDataMap); } - public modifyColumnFamily_args() { + public modifyTable_args() { } - public modifyColumnFamily_args( - TTableName tableName, - TColumnFamilyDescriptor column) + public modifyTable_args( + TTableDescriptor desc) { this(); - this.tableName = tableName; - this.column = column; + this.desc = desc; } /** * Performs a deep copy on other. */ - public modifyColumnFamily_args(modifyColumnFamily_args other) { - if (other.isSetTableName()) { - this.tableName = new TTableName(other.tableName); - } - if (other.isSetColumn()) { - this.column = new TColumnFamilyDescriptor(other.column); + public modifyTable_args(modifyTable_args other) { + if (other.isSetDesc()) { + this.desc = new TTableDescriptor(other.desc); } } - public modifyColumnFamily_args deepCopy() { - return new modifyColumnFamily_args(this); + public modifyTable_args deepCopy() { + return new modifyTable_args(this); } @Override public void clear() { - this.tableName = null; - this.column = null; - } - - /** - * the tablename to modify column family - */ - @org.apache.thrift.annotation.Nullable - public TTableName getTableName() { - return this.tableName; - } - - /** - * the tablename to modify column family - */ - public modifyColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) { - this.tableName = tableName; - return this; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } + this.desc = null; } /** - * column family descriptor of column family to be modified + * the descriptor of the table to modify */ @org.apache.thrift.annotation.Nullable - public TColumnFamilyDescriptor getColumn() { - return this.column; + public TTableDescriptor getDesc() { + return this.desc; } /** - * column family descriptor of column family to be modified + * the descriptor of the table to modify */ - public modifyColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) { - this.column = column; + public modifyTable_args setDesc(@org.apache.thrift.annotation.Nullable TTableDescriptor desc) { + this.desc = desc; return this; } - public void unsetColumn() { - this.column = null; + public void unsetDesc() { + this.desc = null; } - /** Returns true if field column is set (has been assigned a value) and false otherwise */ - public boolean isSetColumn() { - return this.column != null; + /** Returns true if field desc is set (has been assigned a value) and false otherwise */ + public boolean isSetDesc() { + return this.desc != null; } - public void setColumnIsSet(boolean value) { + public void setDescIsSet(boolean value) { if (!value) { - this.column = null; + this.desc = null; } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((TTableName)value); - } - break; - - case COLUMN: + case DESC: if (value == null) { - unsetColumn(); + unsetDesc(); } else { - setColumn((TColumnFamilyDescriptor)value); + setDesc((TTableDescriptor)value); } break; @@ -46104,11 +45712,8 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case TABLE_NAME: - return getTableName(); - - case COLUMN: - return getColumn(); + case DESC: + return getDesc(); } throw new java.lang.IllegalStateException(); @@ -46121,42 +45726,31 @@ public boolean isSet(_Fields field) { } switch (field) { - case TABLE_NAME: - return isSetTableName(); - case COLUMN: - return isSetColumn(); + case DESC: + return isSetDesc(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof modifyColumnFamily_args) - return this.equals((modifyColumnFamily_args)that); + if (that instanceof modifyTable_args) + return this.equals((modifyTable_args)that); return false; } - public boolean equals(modifyColumnFamily_args that) { + public boolean equals(modifyTable_args that) { if (that == null) return false; if (this == that) return true; - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - boolean this_present_column = true && this.isSetColumn(); - boolean that_present_column = true && that.isSetColumn(); - if (this_present_column || that_present_column) { - if (!(this_present_column && that_present_column)) + boolean this_present_desc = true && this.isSetDesc(); + boolean that_present_desc = true && that.isSetDesc(); + if (this_present_desc || that_present_desc) { + if (!(this_present_desc && that_present_desc)) return false; - if (!this.column.equals(that.column)) + if (!this.desc.equals(that.desc)) return false; } @@ -46167,41 +45761,27 @@ public boolean equals(modifyColumnFamily_args that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287); - if (isSetTableName()) - hashCode = hashCode * 8191 + tableName.hashCode(); - - hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287); - if (isSetColumn()) - hashCode = hashCode * 8191 + column.hashCode(); + hashCode = hashCode * 8191 + ((isSetDesc()) ? 131071 : 524287); + if (isSetDesc()) + hashCode = hashCode * 8191 + desc.hashCode(); return hashCode; } @Override - public int compareTo(modifyColumnFamily_args other) { + public int compareTo(modifyTable_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetTableName(), other.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = java.lang.Boolean.compare(isSetColumn(), other.isSetColumn()); + lastComparison = java.lang.Boolean.compare(isSetDesc(), other.isSetDesc()); if (lastComparison != 0) { return lastComparison; } - if (isSetColumn()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column, other.column); + if (isSetDesc()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.desc, other.desc); if (lastComparison != 0) { return lastComparison; } @@ -46224,22 +45804,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyColumnFamily_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyTable_args("); boolean first = true; - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - if (!first) sb.append(", "); - sb.append("column:"); - if (this.column == null) { + sb.append("desc:"); + if (this.desc == null) { sb.append("null"); } else { - sb.append(this.column); + sb.append(this.desc); } first = false; sb.append(")"); @@ -46248,18 +45820,12 @@ public java.lang.String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields - if (tableName == null) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString()); - } - if (column == null) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString()); + if (desc == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'desc' was not present! Struct: " + toString()); } // check for sub-struct validity - if (tableName != null) { - tableName.validate(); - } - if (column != null) { - column.validate(); + if (desc != null) { + desc.validate(); } } @@ -46279,15 +45845,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class modifyColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyColumnFamily_argsStandardScheme getScheme() { - return new modifyColumnFamily_argsStandardScheme(); + private static class modifyTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyTable_argsStandardScheme getScheme() { + return new modifyTable_argsStandardScheme(); } } - private static class modifyColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class modifyTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -46297,20 +45863,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_ break; } switch (schemeField.id) { - case 1: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.tableName = new TTableName(); - struct.tableName.read(iprot); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // COLUMN + case 1: // DESC if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.column = new TColumnFamilyDescriptor(); - struct.column.read(iprot); - struct.setColumnIsSet(true); + struct.desc = new TTableDescriptor(); + struct.desc.read(iprot); + struct.setDescIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -46326,18 +45883,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.tableName != null) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - struct.tableName.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.column != null) { - oprot.writeFieldBegin(COLUMN_FIELD_DESC); - struct.column.write(oprot); + if (struct.desc != null) { + oprot.writeFieldBegin(DESC_FIELD_DESC); + struct.desc.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -46346,30 +45898,26 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily } - private static class modifyColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyColumnFamily_argsTupleScheme getScheme() { - return new modifyColumnFamily_argsTupleScheme(); + private static class modifyTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyTable_argsTupleScheme getScheme() { + return new modifyTable_argsTupleScheme(); } } - private static class modifyColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class modifyTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.tableName.write(oprot); - struct.column.write(oprot); + struct.desc.write(oprot); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.tableName = new TTableName(); - struct.tableName.read(iprot); - struct.setTableNameIsSet(true); - struct.column = new TColumnFamilyDescriptor(); - struct.column.read(iprot); - struct.setColumnIsSet(true); + struct.desc = new TTableDescriptor(); + struct.desc.read(iprot); + struct.setDescIsSet(true); } } @@ -46378,13 +45926,13 @@ private static S scheme(org.apache. } } - public static class modifyColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyColumnFamily_result"); + public static class modifyTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyTable_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyColumnFamily_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyColumnFamily_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyTable_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyTable_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable TIOError io; // required @@ -46455,13 +46003,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_result.class, metaDataMap); } - public modifyColumnFamily_result() { + public modifyTable_result() { } - public modifyColumnFamily_result( + public modifyTable_result( TIOError io) { this(); @@ -46471,14 +46019,14 @@ public modifyColumnFamily_result( /** * Performs a deep copy on other. */ - public modifyColumnFamily_result(modifyColumnFamily_result other) { + public modifyTable_result(modifyTable_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public modifyColumnFamily_result deepCopy() { - return new modifyColumnFamily_result(this); + public modifyTable_result deepCopy() { + return new modifyTable_result(this); } @Override @@ -46491,7 +46039,7 @@ public TIOError getIo() { return this.io; } - public modifyColumnFamily_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public modifyTable_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -46549,12 +46097,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof modifyColumnFamily_result) - return this.equals((modifyColumnFamily_result)that); + if (that instanceof modifyTable_result) + return this.equals((modifyTable_result)that); return false; } - public boolean equals(modifyColumnFamily_result that) { + public boolean equals(modifyTable_result that) { if (that == null) return false; if (this == that) @@ -46584,7 +46132,7 @@ public int hashCode() { } @Override - public int compareTo(modifyColumnFamily_result other) { + public int compareTo(modifyTable_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -46619,7 +46167,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyColumnFamily_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyTable_result("); boolean first = true; sb.append("io:"); @@ -46654,15 +46202,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class modifyColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyColumnFamily_resultStandardScheme getScheme() { - return new modifyColumnFamily_resultStandardScheme(); + private static class modifyTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyTable_resultStandardScheme getScheme() { + return new modifyTable_resultStandardScheme(); } } - private static class modifyColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class modifyTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -46692,7 +46240,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -46707,16 +46255,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily } - private static class modifyColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyColumnFamily_resultTupleScheme getScheme() { - return new modifyColumnFamily_resultTupleScheme(); + private static class modifyTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyTable_resultTupleScheme getScheme() { + return new modifyTable_resultTupleScheme(); } } - private static class modifyColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class modifyTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -46729,7 +46277,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -46745,25 +46293,25 @@ private static S scheme(org.apache. } } - public static class modifyTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyTable_args"); + public static class createNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createNamespace_args"); - private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("namespaceDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyTable_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyTable_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createNamespace_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createNamespace_argsTupleSchemeFactory(); /** - * the descriptor of the table to modify + * descriptor which describes the new namespace */ - public @org.apache.thrift.annotation.Nullable TTableDescriptor desc; // required + public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * the descriptor of the table to modify + * descriptor which describes the new namespace */ - DESC((short)1, "desc"); + NAMESPACE_DESC((short)1, "namespaceDesc"); private static final java.util.Map byName = new java.util.HashMap(); @@ -46779,8 +46327,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DESC - return DESC; + case 1: // NAMESPACE_DESC + return NAMESPACE_DESC; default: return null; } @@ -46825,78 +46373,78 @@ public java.lang.String getFieldName() { public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DESC, new org.apache.thrift.meta_data.FieldMetaData("desc", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))); + tmpMap.put(_Fields.NAMESPACE_DESC, new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_args.class, metaDataMap); } - public modifyTable_args() { + public createNamespace_args() { } - public modifyTable_args( - TTableDescriptor desc) + public createNamespace_args( + TNamespaceDescriptor namespaceDesc) { this(); - this.desc = desc; + this.namespaceDesc = namespaceDesc; } /** * Performs a deep copy on other. */ - public modifyTable_args(modifyTable_args other) { - if (other.isSetDesc()) { - this.desc = new TTableDescriptor(other.desc); + public createNamespace_args(createNamespace_args other) { + if (other.isSetNamespaceDesc()) { + this.namespaceDesc = new TNamespaceDescriptor(other.namespaceDesc); } } - public modifyTable_args deepCopy() { - return new modifyTable_args(this); + public createNamespace_args deepCopy() { + return new createNamespace_args(this); } @Override public void clear() { - this.desc = null; + this.namespaceDesc = null; } /** - * the descriptor of the table to modify + * descriptor which describes the new namespace */ @org.apache.thrift.annotation.Nullable - public TTableDescriptor getDesc() { - return this.desc; + public TNamespaceDescriptor getNamespaceDesc() { + return this.namespaceDesc; } /** - * the descriptor of the table to modify + * descriptor which describes the new namespace */ - public modifyTable_args setDesc(@org.apache.thrift.annotation.Nullable TTableDescriptor desc) { - this.desc = desc; + public createNamespace_args setNamespaceDesc(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) { + this.namespaceDesc = namespaceDesc; return this; } - public void unsetDesc() { - this.desc = null; + public void unsetNamespaceDesc() { + this.namespaceDesc = null; } - /** Returns true if field desc is set (has been assigned a value) and false otherwise */ - public boolean isSetDesc() { - return this.desc != null; + /** Returns true if field namespaceDesc is set (has been assigned a value) and false otherwise */ + public boolean isSetNamespaceDesc() { + return this.namespaceDesc != null; } - public void setDescIsSet(boolean value) { + public void setNamespaceDescIsSet(boolean value) { if (!value) { - this.desc = null; + this.namespaceDesc = null; } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case DESC: + case NAMESPACE_DESC: if (value == null) { - unsetDesc(); + unsetNamespaceDesc(); } else { - setDesc((TTableDescriptor)value); + setNamespaceDesc((TNamespaceDescriptor)value); } break; @@ -46906,8 +46454,8 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case DESC: - return getDesc(); + case NAMESPACE_DESC: + return getNamespaceDesc(); } throw new java.lang.IllegalStateException(); @@ -46920,31 +46468,31 @@ public boolean isSet(_Fields field) { } switch (field) { - case DESC: - return isSetDesc(); + case NAMESPACE_DESC: + return isSetNamespaceDesc(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof modifyTable_args) - return this.equals((modifyTable_args)that); + if (that instanceof createNamespace_args) + return this.equals((createNamespace_args)that); return false; } - public boolean equals(modifyTable_args that) { + public boolean equals(createNamespace_args that) { if (that == null) return false; if (this == that) return true; - boolean this_present_desc = true && this.isSetDesc(); - boolean that_present_desc = true && that.isSetDesc(); - if (this_present_desc || that_present_desc) { - if (!(this_present_desc && that_present_desc)) + boolean this_present_namespaceDesc = true && this.isSetNamespaceDesc(); + boolean that_present_namespaceDesc = true && that.isSetNamespaceDesc(); + if (this_present_namespaceDesc || that_present_namespaceDesc) { + if (!(this_present_namespaceDesc && that_present_namespaceDesc)) return false; - if (!this.desc.equals(that.desc)) + if (!this.namespaceDesc.equals(that.namespaceDesc)) return false; } @@ -46955,27 +46503,27 @@ public boolean equals(modifyTable_args that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((isSetDesc()) ? 131071 : 524287); - if (isSetDesc()) - hashCode = hashCode * 8191 + desc.hashCode(); + hashCode = hashCode * 8191 + ((isSetNamespaceDesc()) ? 131071 : 524287); + if (isSetNamespaceDesc()) + hashCode = hashCode * 8191 + namespaceDesc.hashCode(); return hashCode; } @Override - public int compareTo(modifyTable_args other) { + public int compareTo(createNamespace_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetDesc(), other.isSetDesc()); + lastComparison = java.lang.Boolean.compare(isSetNamespaceDesc(), other.isSetNamespaceDesc()); if (lastComparison != 0) { return lastComparison; } - if (isSetDesc()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.desc, other.desc); + if (isSetNamespaceDesc()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.namespaceDesc, other.namespaceDesc); if (lastComparison != 0) { return lastComparison; } @@ -46998,14 +46546,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyTable_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("createNamespace_args("); boolean first = true; - sb.append("desc:"); - if (this.desc == null) { + sb.append("namespaceDesc:"); + if (this.namespaceDesc == null) { sb.append("null"); } else { - sb.append(this.desc); + sb.append(this.namespaceDesc); } first = false; sb.append(")"); @@ -47014,12 +46562,12 @@ public java.lang.String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields - if (desc == null) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'desc' was not present! Struct: " + toString()); + if (namespaceDesc == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'namespaceDesc' was not present! Struct: " + toString()); } // check for sub-struct validity - if (desc != null) { - desc.validate(); + if (namespaceDesc != null) { + namespaceDesc.validate(); } } @@ -47039,15 +46587,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class modifyTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyTable_argsStandardScheme getScheme() { - return new modifyTable_argsStandardScheme(); + private static class createNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public createNamespace_argsStandardScheme getScheme() { + return new createNamespace_argsStandardScheme(); } } - private static class modifyTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class createNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -47057,11 +46605,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args st break; } switch (schemeField.id) { - case 1: // DESC + case 1: // NAMESPACE_DESC if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.desc = new TTableDescriptor(); - struct.desc.read(iprot); - struct.setDescIsSet(true); + struct.namespaceDesc = new TNamespaceDescriptor(); + struct.namespaceDesc.read(iprot); + struct.setNamespaceDescIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -47077,13 +46625,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.desc != null) { - oprot.writeFieldBegin(DESC_FIELD_DESC); - struct.desc.write(oprot); + if (struct.namespaceDesc != null) { + oprot.writeFieldBegin(NAMESPACE_DESC_FIELD_DESC); + struct.namespaceDesc.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -47092,26 +46640,26 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_args s } - private static class modifyTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyTable_argsTupleScheme getScheme() { - return new modifyTable_argsTupleScheme(); + private static class createNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public createNamespace_argsTupleScheme getScheme() { + return new createNamespace_argsTupleScheme(); } } - private static class modifyTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class createNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.desc.write(oprot); + struct.namespaceDesc.write(oprot); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.desc = new TTableDescriptor(); - struct.desc.read(iprot); - struct.setDescIsSet(true); + struct.namespaceDesc = new TNamespaceDescriptor(); + struct.namespaceDesc.read(iprot); + struct.setNamespaceDescIsSet(true); } } @@ -47120,13 +46668,13 @@ private static S scheme(org.apache. } } - public static class modifyTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyTable_result"); + public static class createNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createNamespace_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyTable_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyTable_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createNamespace_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createNamespace_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable TIOError io; // required @@ -47197,13 +46745,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_result.class, metaDataMap); } - public modifyTable_result() { + public createNamespace_result() { } - public modifyTable_result( + public createNamespace_result( TIOError io) { this(); @@ -47213,14 +46761,14 @@ public modifyTable_result( /** * Performs a deep copy on other. */ - public modifyTable_result(modifyTable_result other) { + public createNamespace_result(createNamespace_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public modifyTable_result deepCopy() { - return new modifyTable_result(this); + public createNamespace_result deepCopy() { + return new createNamespace_result(this); } @Override @@ -47233,7 +46781,7 @@ public TIOError getIo() { return this.io; } - public modifyTable_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public createNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -47291,12 +46839,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof modifyTable_result) - return this.equals((modifyTable_result)that); + if (that instanceof createNamespace_result) + return this.equals((createNamespace_result)that); return false; } - public boolean equals(modifyTable_result that) { + public boolean equals(createNamespace_result that) { if (that == null) return false; if (this == that) @@ -47326,7 +46874,7 @@ public int hashCode() { } @Override - public int compareTo(modifyTable_result other) { + public int compareTo(createNamespace_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -47361,7 +46909,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyTable_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("createNamespace_result("); boolean first = true; sb.append("io:"); @@ -47396,15 +46944,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class modifyTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyTable_resultStandardScheme getScheme() { - return new modifyTable_resultStandardScheme(); + private static class createNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public createNamespace_resultStandardScheme getScheme() { + return new createNamespace_resultStandardScheme(); } } - private static class modifyTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class createNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -47434,7 +46982,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -47449,16 +46997,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_result } - private static class modifyTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyTable_resultTupleScheme getScheme() { - return new modifyTable_resultTupleScheme(); + private static class createNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public createNamespace_resultTupleScheme getScheme() { + return new createNamespace_resultTupleScheme(); } } - private static class modifyTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class createNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -47471,7 +47019,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -47487,13 +47035,13 @@ private static S scheme(org.apache. } } - public static class createNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createNamespace_args"); + public static class modifyNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyNamespace_args"); private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("namespaceDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createNamespace_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createNamespace_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyNamespace_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyNamespace_argsTupleSchemeFactory(); /** * descriptor which describes the new namespace @@ -47570,13 +47118,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.NAMESPACE_DESC, new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_args.class, metaDataMap); } - public createNamespace_args() { + public modifyNamespace_args() { } - public createNamespace_args( + public modifyNamespace_args( TNamespaceDescriptor namespaceDesc) { this(); @@ -47586,14 +47134,14 @@ public createNamespace_args( /** * Performs a deep copy on other. */ - public createNamespace_args(createNamespace_args other) { + public modifyNamespace_args(modifyNamespace_args other) { if (other.isSetNamespaceDesc()) { this.namespaceDesc = new TNamespaceDescriptor(other.namespaceDesc); } } - public createNamespace_args deepCopy() { - return new createNamespace_args(this); + public modifyNamespace_args deepCopy() { + return new modifyNamespace_args(this); } @Override @@ -47612,7 +47160,7 @@ public TNamespaceDescriptor getNamespaceDesc() { /** * descriptor which describes the new namespace */ - public createNamespace_args setNamespaceDesc(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) { + public modifyNamespace_args setNamespaceDesc(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) { this.namespaceDesc = namespaceDesc; return this; } @@ -47670,12 +47218,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof createNamespace_args) - return this.equals((createNamespace_args)that); + if (that instanceof modifyNamespace_args) + return this.equals((modifyNamespace_args)that); return false; } - public boolean equals(createNamespace_args that) { + public boolean equals(modifyNamespace_args that) { if (that == null) return false; if (this == that) @@ -47705,7 +47253,7 @@ public int hashCode() { } @Override - public int compareTo(createNamespace_args other) { + public int compareTo(modifyNamespace_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -47740,7 +47288,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("createNamespace_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyNamespace_args("); boolean first = true; sb.append("namespaceDesc:"); @@ -47781,15 +47329,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class createNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public createNamespace_argsStandardScheme getScheme() { - return new createNamespace_argsStandardScheme(); + private static class modifyNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyNamespace_argsStandardScheme getScheme() { + return new modifyNamespace_argsStandardScheme(); } } - private static class createNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class modifyNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -47819,7 +47367,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -47834,22 +47382,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_ar } - private static class createNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public createNamespace_argsTupleScheme getScheme() { - return new createNamespace_argsTupleScheme(); + private static class modifyNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyNamespace_argsTupleScheme getScheme() { + return new modifyNamespace_argsTupleScheme(); } } - private static class createNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class modifyNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.namespaceDesc.write(oprot); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.namespaceDesc = new TNamespaceDescriptor(); struct.namespaceDesc.read(iprot); @@ -47862,13 +47410,13 @@ private static S scheme(org.apache. } } - public static class createNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createNamespace_result"); + public static class modifyNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyNamespace_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createNamespace_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createNamespace_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyNamespace_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyNamespace_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable TIOError io; // required @@ -47939,13 +47487,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_result.class, metaDataMap); } - public createNamespace_result() { + public modifyNamespace_result() { } - public createNamespace_result( + public modifyNamespace_result( TIOError io) { this(); @@ -47955,14 +47503,14 @@ public createNamespace_result( /** * Performs a deep copy on other. */ - public createNamespace_result(createNamespace_result other) { + public modifyNamespace_result(modifyNamespace_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public createNamespace_result deepCopy() { - return new createNamespace_result(this); + public modifyNamespace_result deepCopy() { + return new modifyNamespace_result(this); } @Override @@ -47975,7 +47523,7 @@ public TIOError getIo() { return this.io; } - public createNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public modifyNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -48033,12 +47581,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof createNamespace_result) - return this.equals((createNamespace_result)that); + if (that instanceof modifyNamespace_result) + return this.equals((modifyNamespace_result)that); return false; } - public boolean equals(createNamespace_result that) { + public boolean equals(modifyNamespace_result that) { if (that == null) return false; if (this == that) @@ -48068,7 +47616,7 @@ public int hashCode() { } @Override - public int compareTo(createNamespace_result other) { + public int compareTo(modifyNamespace_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -48103,7 +47651,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("createNamespace_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyNamespace_result("); boolean first = true; sb.append("io:"); @@ -48138,15 +47686,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class createNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public createNamespace_resultStandardScheme getScheme() { - return new createNamespace_resultStandardScheme(); + private static class modifyNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyNamespace_resultStandardScheme getScheme() { + return new modifyNamespace_resultStandardScheme(); } } - private static class createNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class modifyNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -48176,7 +47724,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -48191,16 +47739,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_re } - private static class createNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public createNamespace_resultTupleScheme getScheme() { - return new createNamespace_resultTupleScheme(); + private static class modifyNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public modifyNamespace_resultTupleScheme getScheme() { + return new modifyNamespace_resultTupleScheme(); } } - private static class createNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class modifyNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -48213,7 +47761,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_res } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -48229,25 +47777,25 @@ private static S scheme(org.apache. } } - public static class modifyNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyNamespace_args"); + public static class deleteNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteNamespace_args"); - private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("namespaceDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyNamespace_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyNamespace_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteNamespace_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteNamespace_argsTupleSchemeFactory(); /** - * descriptor which describes the new namespace + * namespace name */ - public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc; // required + public @org.apache.thrift.annotation.Nullable java.lang.String name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * descriptor which describes the new namespace + * namespace name */ - NAMESPACE_DESC((short)1, "namespaceDesc"); + NAME((short)1, "name"); private static final java.util.Map byName = new java.util.HashMap(); @@ -48263,8 +47811,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // NAMESPACE_DESC - return NAMESPACE_DESC; + case 1: // NAME + return NAME; default: return null; } @@ -48309,78 +47857,78 @@ public java.lang.String getFieldName() { public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAMESPACE_DESC, new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_args.class, metaDataMap); } - public modifyNamespace_args() { + public deleteNamespace_args() { } - public modifyNamespace_args( - TNamespaceDescriptor namespaceDesc) + public deleteNamespace_args( + java.lang.String name) { this(); - this.namespaceDesc = namespaceDesc; + this.name = name; } /** * Performs a deep copy on other. */ - public modifyNamespace_args(modifyNamespace_args other) { - if (other.isSetNamespaceDesc()) { - this.namespaceDesc = new TNamespaceDescriptor(other.namespaceDesc); + public deleteNamespace_args(deleteNamespace_args other) { + if (other.isSetName()) { + this.name = other.name; } } - public modifyNamespace_args deepCopy() { - return new modifyNamespace_args(this); + public deleteNamespace_args deepCopy() { + return new deleteNamespace_args(this); } @Override public void clear() { - this.namespaceDesc = null; + this.name = null; } /** - * descriptor which describes the new namespace + * namespace name */ @org.apache.thrift.annotation.Nullable - public TNamespaceDescriptor getNamespaceDesc() { - return this.namespaceDesc; + public java.lang.String getName() { + return this.name; } /** - * descriptor which describes the new namespace + * namespace name */ - public modifyNamespace_args setNamespaceDesc(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) { - this.namespaceDesc = namespaceDesc; + public deleteNamespace_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) { + this.name = name; return this; } - public void unsetNamespaceDesc() { - this.namespaceDesc = null; + public void unsetName() { + this.name = null; } - /** Returns true if field namespaceDesc is set (has been assigned a value) and false otherwise */ - public boolean isSetNamespaceDesc() { - return this.namespaceDesc != null; + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; } - public void setNamespaceDescIsSet(boolean value) { + public void setNameIsSet(boolean value) { if (!value) { - this.namespaceDesc = null; + this.name = null; } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case NAMESPACE_DESC: + case NAME: if (value == null) { - unsetNamespaceDesc(); + unsetName(); } else { - setNamespaceDesc((TNamespaceDescriptor)value); + setName((java.lang.String)value); } break; @@ -48390,8 +47938,8 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case NAMESPACE_DESC: - return getNamespaceDesc(); + case NAME: + return getName(); } throw new java.lang.IllegalStateException(); @@ -48404,31 +47952,31 @@ public boolean isSet(_Fields field) { } switch (field) { - case NAMESPACE_DESC: - return isSetNamespaceDesc(); + case NAME: + return isSetName(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof modifyNamespace_args) - return this.equals((modifyNamespace_args)that); + if (that instanceof deleteNamespace_args) + return this.equals((deleteNamespace_args)that); return false; } - public boolean equals(modifyNamespace_args that) { + public boolean equals(deleteNamespace_args that) { if (that == null) return false; if (this == that) return true; - boolean this_present_namespaceDesc = true && this.isSetNamespaceDesc(); - boolean that_present_namespaceDesc = true && that.isSetNamespaceDesc(); - if (this_present_namespaceDesc || that_present_namespaceDesc) { - if (!(this_present_namespaceDesc && that_present_namespaceDesc)) + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) return false; - if (!this.namespaceDesc.equals(that.namespaceDesc)) + if (!this.name.equals(that.name)) return false; } @@ -48439,27 +47987,27 @@ public boolean equals(modifyNamespace_args that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((isSetNamespaceDesc()) ? 131071 : 524287); - if (isSetNamespaceDesc()) - hashCode = hashCode * 8191 + namespaceDesc.hashCode(); + hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287); + if (isSetName()) + hashCode = hashCode * 8191 + name.hashCode(); return hashCode; } @Override - public int compareTo(modifyNamespace_args other) { + public int compareTo(deleteNamespace_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetNamespaceDesc(), other.isSetNamespaceDesc()); + lastComparison = java.lang.Boolean.compare(isSetName(), other.isSetName()); if (lastComparison != 0) { return lastComparison; } - if (isSetNamespaceDesc()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.namespaceDesc, other.namespaceDesc); + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); if (lastComparison != 0) { return lastComparison; } @@ -48482,14 +48030,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyNamespace_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteNamespace_args("); boolean first = true; - sb.append("namespaceDesc:"); - if (this.namespaceDesc == null) { + sb.append("name:"); + if (this.name == null) { sb.append("null"); } else { - sb.append(this.namespaceDesc); + sb.append(this.name); } first = false; sb.append(")"); @@ -48498,13 +48046,10 @@ public java.lang.String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields - if (namespaceDesc == null) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'namespaceDesc' was not present! Struct: " + toString()); + if (name == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); } // check for sub-struct validity - if (namespaceDesc != null) { - namespaceDesc.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -48523,15 +48068,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class modifyNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyNamespace_argsStandardScheme getScheme() { - return new modifyNamespace_argsStandardScheme(); + private static class deleteNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteNamespace_argsStandardScheme getScheme() { + return new deleteNamespace_argsStandardScheme(); } } - private static class modifyNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class deleteNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -48541,11 +48086,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_arg break; } switch (schemeField.id) { - case 1: // NAMESPACE_DESC - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.namespaceDesc = new TNamespaceDescriptor(); - struct.namespaceDesc.read(iprot); - struct.setNamespaceDescIsSet(true); + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -48561,13 +48105,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.namespaceDesc != null) { - oprot.writeFieldBegin(NAMESPACE_DESC_FIELD_DESC); - struct.namespaceDesc.write(oprot); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -48576,26 +48120,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_ar } - private static class modifyNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyNamespace_argsTupleScheme getScheme() { - return new modifyNamespace_argsTupleScheme(); + private static class deleteNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteNamespace_argsTupleScheme getScheme() { + return new deleteNamespace_argsTupleScheme(); } } - private static class modifyNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class deleteNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.namespaceDesc.write(oprot); + oprot.writeString(struct.name); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.namespaceDesc = new TNamespaceDescriptor(); - struct.namespaceDesc.read(iprot); - struct.setNamespaceDescIsSet(true); + struct.name = iprot.readString(); + struct.setNameIsSet(true); } } @@ -48604,13 +48147,13 @@ private static S scheme(org.apache. } } - public static class modifyNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyNamespace_result"); + public static class deleteNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteNamespace_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyNamespace_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyNamespace_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteNamespace_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteNamespace_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable TIOError io; // required @@ -48681,13 +48224,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_result.class, metaDataMap); } - public modifyNamespace_result() { + public deleteNamespace_result() { } - public modifyNamespace_result( + public deleteNamespace_result( TIOError io) { this(); @@ -48697,14 +48240,14 @@ public modifyNamespace_result( /** * Performs a deep copy on other. */ - public modifyNamespace_result(modifyNamespace_result other) { + public deleteNamespace_result(deleteNamespace_result other) { if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public modifyNamespace_result deepCopy() { - return new modifyNamespace_result(this); + public deleteNamespace_result deepCopy() { + return new deleteNamespace_result(this); } @Override @@ -48717,7 +48260,7 @@ public TIOError getIo() { return this.io; } - public modifyNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public deleteNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -48775,12 +48318,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof modifyNamespace_result) - return this.equals((modifyNamespace_result)that); + if (that instanceof deleteNamespace_result) + return this.equals((deleteNamespace_result)that); return false; } - public boolean equals(modifyNamespace_result that) { + public boolean equals(deleteNamespace_result that) { if (that == null) return false; if (this == that) @@ -48810,7 +48353,7 @@ public int hashCode() { } @Override - public int compareTo(modifyNamespace_result other) { + public int compareTo(deleteNamespace_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -48845,7 +48388,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("modifyNamespace_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteNamespace_result("); boolean first = true; sb.append("io:"); @@ -48880,15 +48423,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class modifyNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyNamespace_resultStandardScheme getScheme() { - return new modifyNamespace_resultStandardScheme(); + private static class deleteNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteNamespace_resultStandardScheme getScheme() { + return new deleteNamespace_resultStandardScheme(); } } - private static class modifyNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class deleteNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -48918,7 +48461,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -48933,16 +48476,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_re } - private static class modifyNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public modifyNamespace_resultTupleScheme getScheme() { - return new modifyNamespace_resultTupleScheme(); + private static class deleteNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public deleteNamespace_resultTupleScheme getScheme() { + return new deleteNamespace_resultTupleScheme(); } } - private static class modifyNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class deleteNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -48955,7 +48498,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_res } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -48971,23 +48514,23 @@ private static S scheme(org.apache. } } - public static class deleteNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteNamespace_args"); + public static class getNamespaceDescriptor_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_args"); private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteNamespace_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteNamespace_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getNamespaceDescriptor_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getNamespaceDescriptor_argsTupleSchemeFactory(); /** - * namespace name + * name of namespace descriptor */ public @org.apache.thrift.annotation.Nullable java.lang.String name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * namespace name + * name of namespace descriptor */ NAME((short)1, "name"); @@ -49054,13 +48597,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNamespaceDescriptor_args.class, metaDataMap); } - public deleteNamespace_args() { + public getNamespaceDescriptor_args() { } - public deleteNamespace_args( + public getNamespaceDescriptor_args( java.lang.String name) { this(); @@ -49070,14 +48613,14 @@ public deleteNamespace_args( /** * Performs a deep copy on other. */ - public deleteNamespace_args(deleteNamespace_args other) { + public getNamespaceDescriptor_args(getNamespaceDescriptor_args other) { if (other.isSetName()) { this.name = other.name; } } - public deleteNamespace_args deepCopy() { - return new deleteNamespace_args(this); + public getNamespaceDescriptor_args deepCopy() { + return new getNamespaceDescriptor_args(this); } @Override @@ -49086,7 +48629,7 @@ public void clear() { } /** - * namespace name + * name of namespace descriptor */ @org.apache.thrift.annotation.Nullable public java.lang.String getName() { @@ -49094,9 +48637,9 @@ public java.lang.String getName() { } /** - * namespace name + * name of namespace descriptor */ - public deleteNamespace_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) { + public getNamespaceDescriptor_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) { this.name = name; return this; } @@ -49154,12 +48697,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof deleteNamespace_args) - return this.equals((deleteNamespace_args)that); + if (that instanceof getNamespaceDescriptor_args) + return this.equals((getNamespaceDescriptor_args)that); return false; } - public boolean equals(deleteNamespace_args that) { + public boolean equals(getNamespaceDescriptor_args that) { if (that == null) return false; if (this == that) @@ -49189,7 +48732,7 @@ public int hashCode() { } @Override - public int compareTo(deleteNamespace_args other) { + public int compareTo(getNamespaceDescriptor_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -49224,7 +48767,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteNamespace_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getNamespaceDescriptor_args("); boolean first = true; sb.append("name:"); @@ -49262,15 +48805,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class deleteNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteNamespace_argsStandardScheme getScheme() { - return new deleteNamespace_argsStandardScheme(); + private static class getNamespaceDescriptor_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getNamespaceDescriptor_argsStandardScheme getScheme() { + return new getNamespaceDescriptor_argsStandardScheme(); } } - private static class deleteNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getNamespaceDescriptor_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -49299,7 +48842,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -49314,22 +48857,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_ar } - private static class deleteNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteNamespace_argsTupleScheme getScheme() { - return new deleteNamespace_argsTupleScheme(); + private static class getNamespaceDescriptor_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getNamespaceDescriptor_argsTupleScheme getScheme() { + return new getNamespaceDescriptor_argsTupleScheme(); } } - private static class deleteNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getNamespaceDescriptor_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; oprot.writeString(struct.name); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.name = iprot.readString(); struct.setNameIsSet(true); @@ -49341,18 +48884,21 @@ private static S scheme(org.apache. } } - public static class deleteNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteNamespace_result"); + public static class getNamespaceDescriptor_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteNamespace_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteNamespace_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getNamespaceDescriptor_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getNamespaceDescriptor_resultTupleSchemeFactory(); + public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor success; // required public @org.apache.thrift.annotation.Nullable TIOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), IO((short)1, "io"); private static final java.util.Map byName = new java.util.HashMap(); @@ -49369,6 +48915,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // IO return IO; default: @@ -49415,46 +48963,79 @@ public java.lang.String getFieldName() { public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNamespaceDescriptor_result.class, metaDataMap); } - public deleteNamespace_result() { + public getNamespaceDescriptor_result() { } - public deleteNamespace_result( + public getNamespaceDescriptor_result( + TNamespaceDescriptor success, TIOError io) { this(); + this.success = success; this.io = io; } /** * Performs a deep copy on other. */ - public deleteNamespace_result(deleteNamespace_result other) { + public getNamespaceDescriptor_result(getNamespaceDescriptor_result other) { + if (other.isSetSuccess()) { + this.success = new TNamespaceDescriptor(other.success); + } if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public deleteNamespace_result deepCopy() { - return new deleteNamespace_result(this); + public getNamespaceDescriptor_result deepCopy() { + return new getNamespaceDescriptor_result(this); } @Override public void clear() { + this.success = null; this.io = null; } + @org.apache.thrift.annotation.Nullable + public TNamespaceDescriptor getSuccess() { + return this.success; + } + + public getNamespaceDescriptor_result setSuccess(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor success) { + this.success = success; + return this; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + @org.apache.thrift.annotation.Nullable public TIOError getIo() { return this.io; } - public deleteNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public getNamespaceDescriptor_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -49476,6 +49057,14 @@ public void setIoIsSet(boolean value) { public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TNamespaceDescriptor)value); + } + break; + case IO: if (value == null) { unsetIo(); @@ -49490,6 +49079,9 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case IO: return getIo(); @@ -49504,6 +49096,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case IO: return isSetIo(); } @@ -49512,17 +49106,26 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof deleteNamespace_result) - return this.equals((deleteNamespace_result)that); + if (that instanceof getNamespaceDescriptor_result) + return this.equals((getNamespaceDescriptor_result)that); return false; } - public boolean equals(deleteNamespace_result that) { + public boolean equals(getNamespaceDescriptor_result that) { if (that == null) return false; if (this == that) return true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_io = true && this.isSetIo(); boolean that_present_io = true && that.isSetIo(); if (this_present_io || that_present_io) { @@ -49539,6 +49142,10 @@ public boolean equals(deleteNamespace_result that) { public int hashCode() { int hashCode = 1; + hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); + if (isSetSuccess()) + hashCode = hashCode * 8191 + success.hashCode(); + hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode(); @@ -49547,13 +49154,23 @@ public int hashCode() { } @Override - public int compareTo(deleteNamespace_result other) { + public int compareTo(getNamespaceDescriptor_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); if (lastComparison != 0) { return lastComparison; @@ -49582,9 +49199,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("deleteNamespace_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getNamespaceDescriptor_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("io:"); if (this.io == null) { sb.append("null"); @@ -49599,6 +49224,9 @@ public java.lang.String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -49617,15 +49245,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class deleteNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteNamespace_resultStandardScheme getScheme() { - return new deleteNamespace_resultStandardScheme(); + private static class getNamespaceDescriptor_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getNamespaceDescriptor_resultStandardScheme getScheme() { + return new getNamespaceDescriptor_resultStandardScheme(); } } - private static class deleteNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getNamespaceDescriptor_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -49635,6 +49263,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_res break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TNamespaceDescriptor(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // IO if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.io = new TIOError(); @@ -49655,10 +49292,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.io != null) { oprot.writeFieldBegin(IO_FIELD_DESC); struct.io.write(oprot); @@ -49670,32 +49312,43 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_re } - private static class deleteNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public deleteNamespace_resultTupleScheme getScheme() { - return new deleteNamespace_resultTupleScheme(); + private static class getNamespaceDescriptor_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getNamespaceDescriptor_resultTupleScheme getScheme() { + return new getNamespaceDescriptor_resultTupleScheme(); } } - private static class deleteNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getNamespaceDescriptor_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetIo()) { + if (struct.isSetSuccess()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetIo()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetIo()) { struct.io.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { + struct.success = new TNamespaceDescriptor(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.io = new TIOError(); struct.io.read(iprot); struct.setIoIsSet(true); @@ -49708,25 +49361,17 @@ private static S scheme(org.apache. } } - public static class getNamespaceDescriptor_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_args"); + public static class listNamespaceDescriptors_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_args"); - private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getNamespaceDescriptor_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getNamespaceDescriptor_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaceDescriptors_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaceDescriptors_argsTupleSchemeFactory(); - /** - * name of namespace descriptor - */ - public @org.apache.thrift.annotation.Nullable java.lang.String name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * name of namespace descriptor - */ - NAME((short)1, "name"); +; private static final java.util.Map byName = new java.util.HashMap(); @@ -49742,8 +49387,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // NAME - return NAME; default: return null; } @@ -49783,95 +49426,38 @@ public java.lang.String getFieldName() { return _fieldName; } } - - // isset id assignments public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNamespaceDescriptor_args.class, metaDataMap); - } - - public getNamespaceDescriptor_args() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaceDescriptors_args.class, metaDataMap); } - public getNamespaceDescriptor_args( - java.lang.String name) - { - this(); - this.name = name; + public listNamespaceDescriptors_args() { } /** * Performs a deep copy on other. */ - public getNamespaceDescriptor_args(getNamespaceDescriptor_args other) { - if (other.isSetName()) { - this.name = other.name; - } + public listNamespaceDescriptors_args(listNamespaceDescriptors_args other) { } - public getNamespaceDescriptor_args deepCopy() { - return new getNamespaceDescriptor_args(this); + public listNamespaceDescriptors_args deepCopy() { + return new listNamespaceDescriptors_args(this); } @Override public void clear() { - this.name = null; - } - - /** - * name of namespace descriptor - */ - @org.apache.thrift.annotation.Nullable - public java.lang.String getName() { - return this.name; - } - - /** - * name of namespace descriptor - */ - public getNamespaceDescriptor_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) { - this.name = name; - return this; - } - - public void unsetName() { - this.name = null; - } - - /** Returns true if field name is set (has been assigned a value) and false otherwise */ - public boolean isSetName() { - return this.name != null; - } - - public void setNameIsSet(boolean value) { - if (!value) { - this.name = null; - } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case NAME: - if (value == null) { - unsetName(); - } else { - setName((java.lang.String)value); - } - break; - } } @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case NAME: - return getName(); - } throw new java.lang.IllegalStateException(); } @@ -49883,34 +49469,23 @@ public boolean isSet(_Fields field) { } switch (field) { - case NAME: - return isSetName(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof getNamespaceDescriptor_args) - return this.equals((getNamespaceDescriptor_args)that); + if (that instanceof listNamespaceDescriptors_args) + return this.equals((listNamespaceDescriptors_args)that); return false; } - public boolean equals(getNamespaceDescriptor_args that) { + public boolean equals(listNamespaceDescriptors_args that) { if (that == null) return false; if (this == that) return true; - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) - return false; - if (!this.name.equals(that.name)) - return false; - } - return true; } @@ -49918,31 +49493,17 @@ public boolean equals(getNamespaceDescriptor_args that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287); - if (isSetName()) - hashCode = hashCode * 8191 + name.hashCode(); - return hashCode; } @Override - public int compareTo(getNamespaceDescriptor_args other) { + public int compareTo(listNamespaceDescriptors_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetName(), other.isSetName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -49961,25 +49522,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getNamespaceDescriptor_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaceDescriptors_args("); boolean first = true; - sb.append("name:"); - if (this.name == null) { - sb.append("null"); - } else { - sb.append(this.name); - } - first = false; sb.append(")"); return sb.toString(); } public void validate() throws org.apache.thrift.TException { // check for required fields - if (name == null) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); - } // check for sub-struct validity } @@ -49999,15 +49550,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getNamespaceDescriptor_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getNamespaceDescriptor_argsStandardScheme getScheme() { - return new getNamespaceDescriptor_argsStandardScheme(); + private static class listNamespaceDescriptors_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaceDescriptors_argsStandardScheme getScheme() { + return new listNamespaceDescriptors_argsStandardScheme(); } } - private static class getNamespaceDescriptor_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class listNamespaceDescriptors_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -50017,14 +49568,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip break; } switch (schemeField.id) { - case 1: // NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.name = iprot.readString(); - struct.setNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -50036,40 +49579,32 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(struct.name); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class getNamespaceDescriptor_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getNamespaceDescriptor_argsTupleScheme getScheme() { - return new getNamespaceDescriptor_argsTupleScheme(); + private static class listNamespaceDescriptors_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaceDescriptors_argsTupleScheme getScheme() { + return new listNamespaceDescriptors_argsTupleScheme(); } } - private static class getNamespaceDescriptor_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class listNamespaceDescriptors_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - oprot.writeString(struct.name); } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - struct.name = iprot.readString(); - struct.setNameIsSet(true); } } @@ -50078,16 +49613,16 @@ private static S scheme(org.apache. } } - public static class getNamespaceDescriptor_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_result"); + public static class listNamespaceDescriptors_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getNamespaceDescriptor_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getNamespaceDescriptor_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaceDescriptors_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaceDescriptors_resultTupleSchemeFactory(); - public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor success; // required + public @org.apache.thrift.annotation.Nullable java.util.List success; // required public @org.apache.thrift.annotation.Nullable TIOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -50158,18 +49693,19 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))); + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class)))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNamespaceDescriptor_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaceDescriptors_result.class, metaDataMap); } - public getNamespaceDescriptor_result() { + public listNamespaceDescriptors_result() { } - public getNamespaceDescriptor_result( - TNamespaceDescriptor success, + public listNamespaceDescriptors_result( + java.util.List success, TIOError io) { this(); @@ -50180,17 +49716,21 @@ public getNamespaceDescriptor_result( /** * Performs a deep copy on other. */ - public getNamespaceDescriptor_result(getNamespaceDescriptor_result other) { + public listNamespaceDescriptors_result(listNamespaceDescriptors_result other) { if (other.isSetSuccess()) { - this.success = new TNamespaceDescriptor(other.success); + java.util.List __this__success = new java.util.ArrayList(other.success.size()); + for (TNamespaceDescriptor other_element : other.success) { + __this__success.add(new TNamespaceDescriptor(other_element)); + } + this.success = __this__success; } if (other.isSetIo()) { this.io = new TIOError(other.io); } } - public getNamespaceDescriptor_result deepCopy() { - return new getNamespaceDescriptor_result(this); + public listNamespaceDescriptors_result deepCopy() { + return new listNamespaceDescriptors_result(this); } @Override @@ -50199,12 +49739,28 @@ public void clear() { this.io = null; } + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + @org.apache.thrift.annotation.Nullable - public TNamespaceDescriptor getSuccess() { + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(TNamespaceDescriptor elem) { + if (this.success == null) { + this.success = new java.util.ArrayList(); + } + this.success.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getSuccess() { return this.success; } - public getNamespaceDescriptor_result setSuccess(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor success) { + public listNamespaceDescriptors_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { this.success = success; return this; } @@ -50229,7 +49785,7 @@ public TIOError getIo() { return this.io; } - public getNamespaceDescriptor_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public listNamespaceDescriptors_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -50255,7 +49811,7 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetSuccess(); } else { - setSuccess((TNamespaceDescriptor)value); + setSuccess((java.util.List)value); } break; @@ -50300,12 +49856,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof getNamespaceDescriptor_result) - return this.equals((getNamespaceDescriptor_result)that); + if (that instanceof listNamespaceDescriptors_result) + return this.equals((listNamespaceDescriptors_result)that); return false; } - public boolean equals(getNamespaceDescriptor_result that) { + public boolean equals(listNamespaceDescriptors_result that) { if (that == null) return false; if (this == that) @@ -50348,7 +49904,7 @@ public int hashCode() { } @Override - public int compareTo(getNamespaceDescriptor_result other) { + public int compareTo(listNamespaceDescriptors_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -50393,7 +49949,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getNamespaceDescriptor_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaceDescriptors_result("); boolean first = true; sb.append("success:"); @@ -50418,9 +49974,6 @@ public java.lang.String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -50439,15 +49992,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getNamespaceDescriptor_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getNamespaceDescriptor_resultStandardScheme getScheme() { - return new getNamespaceDescriptor_resultStandardScheme(); + private static class listNamespaceDescriptors_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaceDescriptors_resultStandardScheme getScheme() { + return new listNamespaceDescriptors_resultStandardScheme(); } } - private static class getNamespaceDescriptor_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class listNamespaceDescriptors_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -50458,9 +50011,19 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TNamespaceDescriptor(); - struct.success.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list326 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list326.size); + @org.apache.thrift.annotation.Nullable TNamespaceDescriptor _elem327; + for (int _i328 = 0; _i328 < _list326.size; ++_i328) + { + _elem327 = new TNamespaceDescriptor(); + _elem327.read(iprot); + struct.success.add(_elem327); + } + iprot.readListEnd(); + } struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -50486,13 +50049,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (TNamespaceDescriptor _iter329 : struct.success) + { + _iter329.write(oprot); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } if (struct.io != null) { @@ -50506,16 +50076,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescri } - private static class getNamespaceDescriptor_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getNamespaceDescriptor_resultTupleScheme getScheme() { - return new getNamespaceDescriptor_resultTupleScheme(); + private static class listNamespaceDescriptors_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaceDescriptors_resultTupleScheme getScheme() { + return new listNamespaceDescriptors_resultTupleScheme(); } } - private static class getNamespaceDescriptor_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class listNamespaceDescriptors_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetSuccess()) { @@ -50526,7 +50096,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescrip } oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { - struct.success.write(oprot); + { + oprot.writeI32(struct.success.size()); + for (TNamespaceDescriptor _iter330 : struct.success) + { + _iter330.write(oprot); + } + } } if (struct.isSetIo()) { struct.io.write(oprot); @@ -50534,12 +50110,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescrip } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.success = new TNamespaceDescriptor(); - struct.success.read(iprot); + { + org.apache.thrift.protocol.TList _list331 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list331.size); + @org.apache.thrift.annotation.Nullable TNamespaceDescriptor _elem332; + for (int _i333 = 0; _i333 < _list331.size; ++_i333) + { + _elem332 = new TNamespaceDescriptor(); + _elem332.read(iprot); + struct.success.add(_elem332); + } + } struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -50555,12 +50140,12 @@ private static S scheme(org.apache. } } - public static class listNamespaceDescriptors_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_args"); + public static class listNamespaces_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaces_args"); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaceDescriptors_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaceDescriptors_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaces_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaces_argsTupleSchemeFactory(); /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -50624,20 +50209,20 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaceDescriptors_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_args.class, metaDataMap); } - public listNamespaceDescriptors_args() { + public listNamespaces_args() { } /** * Performs a deep copy on other. */ - public listNamespaceDescriptors_args(listNamespaceDescriptors_args other) { + public listNamespaces_args(listNamespaces_args other) { } - public listNamespaceDescriptors_args deepCopy() { - return new listNamespaceDescriptors_args(this); + public listNamespaces_args deepCopy() { + return new listNamespaces_args(this); } @Override @@ -50669,12 +50254,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof listNamespaceDescriptors_args) - return this.equals((listNamespaceDescriptors_args)that); + if (that instanceof listNamespaces_args) + return this.equals((listNamespaces_args)that); return false; } - public boolean equals(listNamespaceDescriptors_args that) { + public boolean equals(listNamespaces_args that) { if (that == null) return false; if (this == that) @@ -50691,7 +50276,7 @@ public int hashCode() { } @Override - public int compareTo(listNamespaceDescriptors_args other) { + public int compareTo(listNamespaces_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -50716,7 +50301,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaceDescriptors_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaces_args("); boolean first = true; sb.append(")"); @@ -50744,15 +50329,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class listNamespaceDescriptors_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaceDescriptors_argsStandardScheme getScheme() { - return new listNamespaceDescriptors_argsStandardScheme(); + private static class listNamespaces_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaces_argsStandardScheme getScheme() { + return new listNamespaces_argsStandardScheme(); } } - private static class listNamespaceDescriptors_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class listNamespaces_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -50773,7 +50358,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -50783,21 +50368,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescr } - private static class listNamespaceDescriptors_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaceDescriptors_argsTupleScheme getScheme() { - return new listNamespaceDescriptors_argsTupleScheme(); + private static class listNamespaces_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaces_argsTupleScheme getScheme() { + return new listNamespaces_argsTupleScheme(); } } - private static class listNamespaceDescriptors_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class listNamespaces_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } } @@ -50807,16 +50392,16 @@ private static S scheme(org.apache. } } - public static class listNamespaceDescriptors_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_result"); + public static class listNamespaces_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaces_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaceDescriptors_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaceDescriptors_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaces_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaces_resultTupleSchemeFactory(); - public @org.apache.thrift.annotation.Nullable java.util.List success; // required + public @org.apache.thrift.annotation.Nullable java.util.List success; // required public @org.apache.thrift.annotation.Nullable TIOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -50888,18 +50473,18 @@ public java.lang.String getFieldName() { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class)))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaceDescriptors_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_result.class, metaDataMap); } - public listNamespaceDescriptors_result() { + public listNamespaces_result() { } - public listNamespaceDescriptors_result( - java.util.List success, + public listNamespaces_result( + java.util.List success, TIOError io) { this(); @@ -50910,12 +50495,9 @@ public listNamespaceDescriptors_result( /** * Performs a deep copy on other. */ - public listNamespaceDescriptors_result(listNamespaceDescriptors_result other) { + public listNamespaces_result(listNamespaces_result other) { if (other.isSetSuccess()) { - java.util.List __this__success = new java.util.ArrayList(other.success.size()); - for (TNamespaceDescriptor other_element : other.success) { - __this__success.add(new TNamespaceDescriptor(other_element)); - } + java.util.List __this__success = new java.util.ArrayList(other.success); this.success = __this__success; } if (other.isSetIo()) { @@ -50923,8 +50505,8 @@ public listNamespaceDescriptors_result(listNamespaceDescriptors_result other) { } } - public listNamespaceDescriptors_result deepCopy() { - return new listNamespaceDescriptors_result(this); + public listNamespaces_result deepCopy() { + return new listNamespaces_result(this); } @Override @@ -50938,23 +50520,23 @@ public int getSuccessSize() { } @org.apache.thrift.annotation.Nullable - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(TNamespaceDescriptor elem) { + public void addToSuccess(java.lang.String elem) { if (this.success == null) { - this.success = new java.util.ArrayList(); + this.success = new java.util.ArrayList(); } this.success.add(elem); } @org.apache.thrift.annotation.Nullable - public java.util.List getSuccess() { + public java.util.List getSuccess() { return this.success; } - public listNamespaceDescriptors_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { + public listNamespaces_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { this.success = success; return this; } @@ -50979,7 +50561,7 @@ public TIOError getIo() { return this.io; } - public listNamespaceDescriptors_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { + public listNamespaces_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { this.io = io; return this; } @@ -51005,7 +50587,7 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetSuccess(); } else { - setSuccess((java.util.List)value); + setSuccess((java.util.List)value); } break; @@ -51050,12 +50632,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof listNamespaceDescriptors_result) - return this.equals((listNamespaceDescriptors_result)that); + if (that instanceof listNamespaces_result) + return this.equals((listNamespaces_result)that); return false; } - public boolean equals(listNamespaceDescriptors_result that) { + public boolean equals(listNamespaces_result that) { if (that == null) return false; if (this == that) @@ -51098,7 +50680,7 @@ public int hashCode() { } @Override - public int compareTo(listNamespaceDescriptors_result other) { + public int compareTo(listNamespaces_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -51143,7 +50725,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaceDescriptors_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaces_result("); boolean first = true; sb.append("success:"); @@ -51186,15 +50768,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class listNamespaceDescriptors_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaceDescriptors_resultStandardScheme getScheme() { - return new listNamespaceDescriptors_resultStandardScheme(); + private static class listNamespaces_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaces_resultStandardScheme getScheme() { + return new listNamespaces_resultStandardScheme(); } } - private static class listNamespaceDescriptors_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class listNamespaces_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -51208,12 +50790,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list334 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list334.size); - @org.apache.thrift.annotation.Nullable TNamespaceDescriptor _elem335; + struct.success = new java.util.ArrayList(_list334.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem335; for (int _i336 = 0; _i336 < _list334.size; ++_i336) { - _elem335 = new TNamespaceDescriptor(); - _elem335.read(iprot); + _elem335 = iprot.readString(); struct.success.add(_elem335); } iprot.readListEnd(); @@ -51243,17 +50824,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TNamespaceDescriptor _iter337 : struct.success) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (java.lang.String _iter337 : struct.success) { - _iter337.write(oprot); + oprot.writeString(_iter337); } oprot.writeListEnd(); } @@ -51270,16 +50851,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescr } - private static class listNamespaceDescriptors_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaceDescriptors_resultTupleScheme getScheme() { - return new listNamespaceDescriptors_resultTupleScheme(); + private static class listNamespaces_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public listNamespaces_resultTupleScheme getScheme() { + return new listNamespaces_resultTupleScheme(); } } - private static class listNamespaceDescriptors_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class listNamespaces_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetSuccess()) { @@ -51292,9 +50873,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescri if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TNamespaceDescriptor _iter338 : struct.success) + for (java.lang.String _iter338 : struct.success) { - _iter338.write(oprot); + oprot.writeString(_iter338); } } } @@ -51304,18 +50885,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescri } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list339 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list339.size); - @org.apache.thrift.annotation.Nullable TNamespaceDescriptor _elem340; + org.apache.thrift.protocol.TList _list339 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list339.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem340; for (int _i341 = 0; _i341 < _list339.size; ++_i341) { - _elem340 = new TNamespaceDescriptor(); - _elem340.read(iprot); + _elem340 = iprot.readString(); struct.success.add(_elem340); } } @@ -51334,12 +50914,12 @@ private static S scheme(org.apache. } } - public static class listNamespaces_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaces_args"); + public static class getThriftServerType_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_args"); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaces_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaces_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_argsTupleSchemeFactory(); /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -51403,20 +50983,20 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_args.class, metaDataMap); } - public listNamespaces_args() { + public getThriftServerType_args() { } /** * Performs a deep copy on other. */ - public listNamespaces_args(listNamespaces_args other) { + public getThriftServerType_args(getThriftServerType_args other) { } - public listNamespaces_args deepCopy() { - return new listNamespaces_args(this); + public getThriftServerType_args deepCopy() { + return new getThriftServerType_args(this); } @Override @@ -51448,12 +51028,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof listNamespaces_args) - return this.equals((listNamespaces_args)that); + if (that instanceof getThriftServerType_args) + return this.equals((getThriftServerType_args)that); return false; } - public boolean equals(listNamespaces_args that) { + public boolean equals(getThriftServerType_args that) { if (that == null) return false; if (this == that) @@ -51470,7 +51050,7 @@ public int hashCode() { } @Override - public int compareTo(listNamespaces_args other) { + public int compareTo(getThriftServerType_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -51495,7 +51075,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaces_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getThriftServerType_args("); boolean first = true; sb.append(")"); @@ -51523,15 +51103,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class listNamespaces_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaces_argsStandardScheme getScheme() { - return new listNamespaces_argsStandardScheme(); + private static class getThriftServerType_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getThriftServerType_argsStandardScheme getScheme() { + return new getThriftServerType_argsStandardScheme(); } } - private static class listNamespaces_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getThriftServerType_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -51552,7 +51132,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -51562,21 +51142,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_arg } - private static class listNamespaces_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaces_argsTupleScheme getScheme() { - return new listNamespaces_argsTupleScheme(); + private static class getThriftServerType_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getThriftServerType_argsTupleScheme getScheme() { + return new getThriftServerType_argsTupleScheme(); } } - private static class listNamespaces_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getThriftServerType_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } } @@ -51586,22 +51166,27 @@ private static S scheme(org.apache. } } - public static class listNamespaces_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaces_result"); + public static class getThriftServerType_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); - private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaces_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaces_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_resultTupleSchemeFactory(); - public @org.apache.thrift.annotation.Nullable java.util.List success; // required - public @org.apache.thrift.annotation.Nullable TIOError io; // required + /** + * + * @see TThriftServerType + */ + public @org.apache.thrift.annotation.Nullable TThriftServerType success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), - IO((short)1, "io"); + /** + * + * @see TThriftServerType + */ + SUCCESS((short)0, "success"); private static final java.util.Map byName = new java.util.HashMap(); @@ -51619,8 +51204,6 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; - case 1: // IO - return IO; default: return null; } @@ -51666,71 +51249,53 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class))); + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TThriftServerType.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_result.class, metaDataMap); } - public listNamespaces_result() { + public getThriftServerType_result() { } - public listNamespaces_result( - java.util.List success, - TIOError io) + public getThriftServerType_result( + TThriftServerType success) { this(); this.success = success; - this.io = io; } /** * Performs a deep copy on other. */ - public listNamespaces_result(listNamespaces_result other) { + public getThriftServerType_result(getThriftServerType_result other) { if (other.isSetSuccess()) { - java.util.List __this__success = new java.util.ArrayList(other.success); - this.success = __this__success; - } - if (other.isSetIo()) { - this.io = new TIOError(other.io); + this.success = other.success; } } - public listNamespaces_result deepCopy() { - return new listNamespaces_result(this); + public getThriftServerType_result deepCopy() { + return new getThriftServerType_result(this); } @Override public void clear() { this.success = null; - this.io = null; - } - - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - @org.apache.thrift.annotation.Nullable - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(java.lang.String elem) { - if (this.success == null) { - this.success = new java.util.ArrayList(); - } - this.success.add(elem); } + /** + * + * @see TThriftServerType + */ @org.apache.thrift.annotation.Nullable - public java.util.List getSuccess() { + public TThriftServerType getSuccess() { return this.success; } - public listNamespaces_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { + /** + * + * @see TThriftServerType + */ + public getThriftServerType_result setSuccess(@org.apache.thrift.annotation.Nullable TThriftServerType success) { this.success = success; return this; } @@ -51750,46 +51315,13 @@ public void setSuccessIsSet(boolean value) { } } - @org.apache.thrift.annotation.Nullable - public TIOError getIo() { - return this.io; - } - - public listNamespaces_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) { - this.io = io; - return this; - } - - public void unsetIo() { - this.io = null; - } - - /** Returns true if field io is set (has been assigned a value) and false otherwise */ - public boolean isSetIo() { - return this.io != null; - } - - public void setIoIsSet(boolean value) { - if (!value) { - this.io = null; - } - } - public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((java.util.List)value); - } - break; - - case IO: - if (value == null) { - unsetIo(); - } else { - setIo((TIOError)value); + setSuccess((TThriftServerType)value); } break; @@ -51802,9 +51334,6 @@ public java.lang.Object getFieldValue(_Fields field) { case SUCCESS: return getSuccess(); - case IO: - return getIo(); - } throw new java.lang.IllegalStateException(); } @@ -51818,20 +51347,18 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return isSetSuccess(); - case IO: - return isSetIo(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof listNamespaces_result) - return this.equals((listNamespaces_result)that); + if (that instanceof getThriftServerType_result) + return this.equals((getThriftServerType_result)that); return false; } - public boolean equals(listNamespaces_result that) { + public boolean equals(getThriftServerType_result that) { if (that == null) return false; if (this == that) @@ -51846,15 +51373,6 @@ public boolean equals(listNamespaces_result that) { return false; } - boolean this_present_io = true && this.isSetIo(); - boolean that_present_io = true && that.isSetIo(); - if (this_present_io || that_present_io) { - if (!(this_present_io && that_present_io)) - return false; - if (!this.io.equals(that.io)) - return false; - } - return true; } @@ -51864,17 +51382,13 @@ public int hashCode() { hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); if (isSetSuccess()) - hashCode = hashCode * 8191 + success.hashCode(); - - hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); - if (isSetIo()) - hashCode = hashCode * 8191 + io.hashCode(); + hashCode = hashCode * 8191 + success.getValue(); return hashCode; } @Override - public int compareTo(listNamespaces_result other) { + public int compareTo(getThriftServerType_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -51891,16 +51405,6 @@ public int compareTo(listNamespaces_result other) { return lastComparison; } } - lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIo()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.io, other.io); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -51919,7 +51423,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("listNamespaces_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getThriftServerType_result("); boolean first = true; sb.append("success:"); @@ -51929,14 +51433,6 @@ public java.lang.String toString() { sb.append(this.success); } first = false; - if (!first) sb.append(", "); - sb.append("io:"); - if (this.io == null) { - sb.append("null"); - } else { - sb.append(this.io); - } - first = false; sb.append(")"); return sb.toString(); } @@ -51962,15 +51458,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class listNamespaces_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaces_resultStandardScheme getScheme() { - return new listNamespaces_resultStandardScheme(); + private static class getThriftServerType_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getThriftServerType_resultStandardScheme getScheme() { + return new getThriftServerType_resultStandardScheme(); } } - private static class listNamespaces_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getThriftServerType_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -51981,32 +51477,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_resu } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list342 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list342.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem343; - for (int _i344 = 0; _i344 < _list342.size; ++_i344) - { - _elem343 = iprot.readString(); - struct.success.add(_elem343); - } - iprot.readListEnd(); - } + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType.findByValue(iprot.readI32()); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 1: // IO - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.io = new TIOError(); - struct.io.read(iprot); - struct.setIoIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -52018,25 +51495,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_resu struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter345 : struct.success) - { - oprot.writeString(_iter345); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.io != null) { - oprot.writeFieldBegin(IO_FIELD_DESC); - struct.io.write(oprot); + oprot.writeI32(struct.success.getValue()); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -52045,61 +51510,35 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_res } - private static class listNamespaces_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public listNamespaces_resultTupleScheme getScheme() { - return new listNamespaces_resultTupleScheme(); + private static class getThriftServerType_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getThriftServerType_resultTupleScheme getScheme() { + return new getThriftServerType_resultTupleScheme(); } } - private static class listNamespaces_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getThriftServerType_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetIo()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); + oprot.writeBitSet(optionals, 1); if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (java.lang.String _iter346 : struct.success) - { - oprot.writeString(_iter346); - } - } - } - if (struct.isSetIo()) { - struct.io.write(oprot); + oprot.writeI32(struct.success.getValue()); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list347 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list347.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem348; - for (int _i349 = 0; _i349 < _list347.size; ++_i349) - { - _elem348 = iprot.readString(); - struct.success.add(_elem348); - } - } + struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType.findByValue(iprot.readI32()); struct.setSuccessIsSet(true); } - if (incoming.get(1)) { - struct.io = new TIOError(); - struct.io.read(iprot); - struct.setIoIsSet(true); - } } } @@ -52108,12 +51547,12 @@ private static S scheme(org.apache. } } - public static class getThriftServerType_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_args"); + public static class getClusterId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_args"); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_argsTupleSchemeFactory(); /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -52177,20 +51616,20 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_args.class, metaDataMap); } - public getThriftServerType_args() { + public getClusterId_args() { } /** * Performs a deep copy on other. */ - public getThriftServerType_args(getThriftServerType_args other) { + public getClusterId_args(getClusterId_args other) { } - public getThriftServerType_args deepCopy() { - return new getThriftServerType_args(this); + public getClusterId_args deepCopy() { + return new getClusterId_args(this); } @Override @@ -52222,12 +51661,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof getThriftServerType_args) - return this.equals((getThriftServerType_args)that); + if (that instanceof getClusterId_args) + return this.equals((getClusterId_args)that); return false; } - public boolean equals(getThriftServerType_args that) { + public boolean equals(getClusterId_args that) { if (that == null) return false; if (this == that) @@ -52244,7 +51683,7 @@ public int hashCode() { } @Override - public int compareTo(getThriftServerType_args other) { + public int compareTo(getClusterId_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -52269,7 +51708,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getThriftServerType_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getClusterId_args("); boolean first = true; sb.append(")"); @@ -52297,15 +51736,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getThriftServerType_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getThriftServerType_argsStandardScheme getScheme() { - return new getThriftServerType_argsStandardScheme(); + private static class getClusterId_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getClusterId_argsStandardScheme getScheme() { + return new getClusterId_argsStandardScheme(); } } - private static class getThriftServerType_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getClusterId_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -52326,7 +51765,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -52336,21 +51775,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerTyp } - private static class getThriftServerType_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getThriftServerType_argsTupleScheme getScheme() { - return new getThriftServerType_argsTupleScheme(); + private static class getClusterId_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getClusterId_argsTupleScheme getScheme() { + return new getClusterId_argsTupleScheme(); } } - private static class getThriftServerType_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getClusterId_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } } @@ -52360,26 +51799,18 @@ private static S scheme(org.apache. } } - public static class getThriftServerType_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_result"); + public static class getClusterId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_resultTupleSchemeFactory(); - /** - * - * @see TThriftServerType - */ - public @org.apache.thrift.annotation.Nullable TThriftServerType success; // required + public @org.apache.thrift.annotation.Nullable java.lang.String success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TThriftServerType - */ SUCCESS((short)0, "success"); private static final java.util.Map byName = new java.util.HashMap(); @@ -52443,16 +51874,16 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TThriftServerType.class))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_result.class, metaDataMap); } - public getThriftServerType_result() { + public getClusterId_result() { } - public getThriftServerType_result( - TThriftServerType success) + public getClusterId_result( + java.lang.String success) { this(); this.success = success; @@ -52461,14 +51892,14 @@ public getThriftServerType_result( /** * Performs a deep copy on other. */ - public getThriftServerType_result(getThriftServerType_result other) { + public getClusterId_result(getClusterId_result other) { if (other.isSetSuccess()) { this.success = other.success; } } - public getThriftServerType_result deepCopy() { - return new getThriftServerType_result(this); + public getClusterId_result deepCopy() { + return new getClusterId_result(this); } @Override @@ -52476,20 +51907,12 @@ public void clear() { this.success = null; } - /** - * - * @see TThriftServerType - */ @org.apache.thrift.annotation.Nullable - public TThriftServerType getSuccess() { + public java.lang.String getSuccess() { return this.success; } - /** - * - * @see TThriftServerType - */ - public getThriftServerType_result setSuccess(@org.apache.thrift.annotation.Nullable TThriftServerType success) { + public getClusterId_result setSuccess(@org.apache.thrift.annotation.Nullable java.lang.String success) { this.success = success; return this; } @@ -52515,7 +51938,7 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetSuccess(); } else { - setSuccess((TThriftServerType)value); + setSuccess((java.lang.String)value); } break; @@ -52547,12 +51970,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof getThriftServerType_result) - return this.equals((getThriftServerType_result)that); + if (that instanceof getClusterId_result) + return this.equals((getClusterId_result)that); return false; } - public boolean equals(getThriftServerType_result that) { + public boolean equals(getClusterId_result that) { if (that == null) return false; if (this == that) @@ -52576,13 +51999,13 @@ public int hashCode() { hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); if (isSetSuccess()) - hashCode = hashCode * 8191 + success.getValue(); + hashCode = hashCode * 8191 + success.hashCode(); return hashCode; } @Override - public int compareTo(getThriftServerType_result other) { + public int compareTo(getClusterId_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -52617,7 +52040,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getThriftServerType_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getClusterId_result("); boolean first = true; sb.append("success:"); @@ -52652,15 +52075,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getThriftServerType_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getThriftServerType_resultStandardScheme getScheme() { - return new getThriftServerType_resultStandardScheme(); + private static class getClusterId_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getClusterId_resultStandardScheme getScheme() { + return new getClusterId_resultStandardScheme(); } } - private static class getThriftServerType_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getClusterId_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -52671,8 +52094,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType.findByValue(iprot.readI32()); + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.success = iprot.readString(); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -52689,13 +52112,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeI32(struct.success.getValue()); + oprot.writeString(struct.success); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -52704,16 +52127,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerTyp } - private static class getThriftServerType_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getThriftServerType_resultTupleScheme getScheme() { - return new getThriftServerType_resultTupleScheme(); + private static class getClusterId_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getClusterId_resultTupleScheme getScheme() { + return new getClusterId_resultTupleScheme(); } } - private static class getThriftServerType_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getClusterId_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetSuccess()) { @@ -52721,16 +52144,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType } oprot.writeBitSet(optionals, 1); if (struct.isSetSuccess()) { - oprot.writeI32(struct.success.getValue()); + oprot.writeString(struct.success); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType.findByValue(iprot.readI32()); + struct.success = iprot.readString(); struct.setSuccessIsSet(true); } } @@ -52741,17 +52164,34 @@ private static S scheme(org.apache. } } - public static class getClusterId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_args"); + public static class getSlowLogResponses_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getSlowLogResponses_args"); + private static final org.apache.thrift.protocol.TField SERVER_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("serverNames", org.apache.thrift.protocol.TType.SET, (short)1); + private static final org.apache.thrift.protocol.TField LOG_QUERY_FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("logQueryFilter", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getSlowLogResponses_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getSlowLogResponses_argsTupleSchemeFactory(); + /** + * @param serverNames Server names to get slowlog responses from + */ + public @org.apache.thrift.annotation.Nullable java.util.Set serverNames; // required + /** + * @param logQueryFilter filter to be used if provided + */ + public @org.apache.thrift.annotation.Nullable TLogQueryFilter logQueryFilter; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + /** + * @param serverNames Server names to get slowlog responses from + */ + SERVER_NAMES((short)1, "serverNames"), + /** + * @param logQueryFilter filter to be used if provided + */ + LOG_QUERY_FILTER((short)2, "logQueryFilter"); private static final java.util.Map byName = new java.util.HashMap(); @@ -52767,644 +52207,10 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByName(java.lang.String name) { - return byName.get(name); - } - - private final short _thriftId; - private final java.lang.String _fieldName; - - _Fields(short thriftId, java.lang.String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public java.lang.String getFieldName() { - return _fieldName; - } - } - public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_args.class, metaDataMap); - } - - public getClusterId_args() { - } - - /** - * Performs a deep copy on other. - */ - public getClusterId_args(getClusterId_args other) { - } - - public getClusterId_args deepCopy() { - return new getClusterId_args(this); - } - - @Override - public void clear() { - } - - public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { - switch (field) { - } - } - - @org.apache.thrift.annotation.Nullable - public java.lang.Object getFieldValue(_Fields field) { - switch (field) { - } - throw new java.lang.IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new java.lang.IllegalArgumentException(); - } - - switch (field) { - } - throw new java.lang.IllegalStateException(); - } - - @Override - public boolean equals(java.lang.Object that) { - if (that instanceof getClusterId_args) - return this.equals((getClusterId_args)that); - return false; - } - - public boolean equals(getClusterId_args that) { - if (that == null) - return false; - if (this == that) - return true; - - return true; - } - - @Override - public int hashCode() { - int hashCode = 1; - - return hashCode; - } - - @Override - public int compareTo(getClusterId_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - return 0; - } - - @org.apache.thrift.annotation.Nullable - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - scheme(iprot).read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - scheme(oprot).write(oprot, this); - } - - @Override - public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getClusterId_args("); - boolean first = true; - - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class getClusterId_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getClusterId_argsStandardScheme getScheme() { - return new getClusterId_argsStandardScheme(); - } - } - - private static class getClusterId_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getClusterId_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getClusterId_argsTupleScheme getScheme() { - return new getClusterId_argsTupleScheme(); - } - } - - private static class getClusterId_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - } - } - - private static S scheme(org.apache.thrift.protocol.TProtocol proto) { - return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); - } - } - - public static class getClusterId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); - - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_resultTupleSchemeFactory(); - - public @org.apache.thrift.annotation.Nullable java.lang.String success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final java.util.Map byName = new java.util.HashMap(); - - static { - for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByName(java.lang.String name) { - return byName.get(name); - } - - private final short _thriftId; - private final java.lang.String _fieldName; - - _Fields(short thriftId, java.lang.String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public java.lang.String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_result.class, metaDataMap); - } - - public getClusterId_result() { - } - - public getClusterId_result( - java.lang.String success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public getClusterId_result(getClusterId_result other) { - if (other.isSetSuccess()) { - this.success = other.success; - } - } - - public getClusterId_result deepCopy() { - return new getClusterId_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - @org.apache.thrift.annotation.Nullable - public java.lang.String getSuccess() { - return this.success; - } - - public getClusterId_result setSuccess(@org.apache.thrift.annotation.Nullable java.lang.String success) { - this.success = success; - return this; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((java.lang.String)value); - } - break; - - } - } - - @org.apache.thrift.annotation.Nullable - public java.lang.Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new java.lang.IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new java.lang.IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new java.lang.IllegalStateException(); - } - - @Override - public boolean equals(java.lang.Object that) { - if (that instanceof getClusterId_result) - return this.equals((getClusterId_result)that); - return false; - } - - public boolean equals(getClusterId_result that) { - if (that == null) - return false; - if (this == that) - return true; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - int hashCode = 1; - - hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); - if (isSetSuccess()) - hashCode = hashCode * 8191 + success.hashCode(); - - return hashCode; - } - - @Override - public int compareTo(getClusterId_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - @org.apache.thrift.annotation.Nullable - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - scheme(iprot).read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - scheme(oprot).write(oprot, this); - } - - @Override - public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getClusterId_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class getClusterId_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getClusterId_resultStandardScheme getScheme() { - return new getClusterId_resultStandardScheme(); - } - } - - private static class getClusterId_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.success = iprot.readString(); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(struct.success); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getClusterId_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getClusterId_resultTupleScheme getScheme() { - return new getClusterId_resultTupleScheme(); - } - } - - private static class getClusterId_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - oprot.writeString(struct.success); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = iprot.readString(); - struct.setSuccessIsSet(true); - } - } - } - - private static S scheme(org.apache.thrift.protocol.TProtocol proto) { - return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); - } - } - - public static class getSlowLogResponses_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getSlowLogResponses_args"); - - private static final org.apache.thrift.protocol.TField SERVER_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("serverNames", org.apache.thrift.protocol.TType.SET, (short)1); - private static final org.apache.thrift.protocol.TField LOG_QUERY_FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("logQueryFilter", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getSlowLogResponses_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getSlowLogResponses_argsTupleSchemeFactory(); - - /** - * @param serverNames Server names to get slowlog responses from - */ - public @org.apache.thrift.annotation.Nullable java.util.Set serverNames; // required - /** - * @param logQueryFilter filter to be used if provided - */ - public @org.apache.thrift.annotation.Nullable TLogQueryFilter logQueryFilter; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * @param serverNames Server names to get slowlog responses from - */ - SERVER_NAMES((short)1, "serverNames"), - /** - * @param logQueryFilter filter to be used if provided - */ - LOG_QUERY_FILTER((short)2, "logQueryFilter"); - - private static final java.util.Map byName = new java.util.HashMap(); - - static { - for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SERVER_NAMES - return SERVER_NAMES; - case 2: // LOG_QUERY_FILTER - return LOG_QUERY_FILTER; + case 1: // SERVER_NAMES + return SERVER_NAMES; + case 2: // LOG_QUERY_FILTER + return LOG_QUERY_FILTER; default: return null; } @@ -53785,14 +52591,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses case 1: // SERVER_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set350 = iprot.readSetBegin(); - struct.serverNames = new java.util.HashSet(2*_set350.size); - @org.apache.thrift.annotation.Nullable TServerName _elem351; - for (int _i352 = 0; _i352 < _set350.size; ++_i352) + org.apache.thrift.protocol.TSet _set342 = iprot.readSetBegin(); + struct.serverNames = new java.util.HashSet(2*_set342.size); + @org.apache.thrift.annotation.Nullable TServerName _elem343; + for (int _i344 = 0; _i344 < _set342.size; ++_i344) { - _elem351 = new TServerName(); - _elem351.read(iprot); - struct.serverNames.add(_elem351); + _elem343 = new TServerName(); + _elem343.read(iprot); + struct.serverNames.add(_elem343); } iprot.readSetEnd(); } @@ -53829,9 +52635,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponse oprot.writeFieldBegin(SERVER_NAMES_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, struct.serverNames.size())); - for (TServerName _iter353 : struct.serverNames) + for (TServerName _iter345 : struct.serverNames) { - _iter353.write(oprot); + _iter345.write(oprot); } oprot.writeSetEnd(); } @@ -53870,9 +52676,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses if (struct.isSetServerNames()) { { oprot.writeI32(struct.serverNames.size()); - for (TServerName _iter354 : struct.serverNames) + for (TServerName _iter346 : struct.serverNames) { - _iter354.write(oprot); + _iter346.write(oprot); } } } @@ -53887,14 +52693,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_ java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TSet _set355 = iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.serverNames = new java.util.HashSet(2*_set355.size); - @org.apache.thrift.annotation.Nullable TServerName _elem356; - for (int _i357 = 0; _i357 < _set355.size; ++_i357) + org.apache.thrift.protocol.TSet _set347 = iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.serverNames = new java.util.HashSet(2*_set347.size); + @org.apache.thrift.annotation.Nullable TServerName _elem348; + for (int _i349 = 0; _i349 < _set347.size; ++_i349) { - _elem356 = new TServerName(); - _elem356.read(iprot); - struct.serverNames.add(_elem356); + _elem348 = new TServerName(); + _elem348.read(iprot); + struct.serverNames.add(_elem348); } } struct.setServerNamesIsSet(true); @@ -54312,14 +53118,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list358 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list358.size); - @org.apache.thrift.annotation.Nullable TOnlineLogRecord _elem359; - for (int _i360 = 0; _i360 < _list358.size; ++_i360) + org.apache.thrift.protocol.TList _list350 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list350.size); + @org.apache.thrift.annotation.Nullable TOnlineLogRecord _elem351; + for (int _i352 = 0; _i352 < _list350.size; ++_i352) { - _elem359 = new TOnlineLogRecord(); - _elem359.read(iprot); - struct.success.add(_elem359); + _elem351 = new TOnlineLogRecord(); + _elem351.read(iprot); + struct.success.add(_elem351); } iprot.readListEnd(); } @@ -54356,9 +53162,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponse oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TOnlineLogRecord _iter361 : struct.success) + for (TOnlineLogRecord _iter353 : struct.success) { - _iter361.write(oprot); + _iter353.write(oprot); } oprot.writeListEnd(); } @@ -54397,9 +53203,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TOnlineLogRecord _iter362 : struct.success) + for (TOnlineLogRecord _iter354 : struct.success) { - _iter362.write(oprot); + _iter354.write(oprot); } } } @@ -54414,14 +53220,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_ java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list363 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list363.size); - @org.apache.thrift.annotation.Nullable TOnlineLogRecord _elem364; - for (int _i365 = 0; _i365 < _list363.size; ++_i365) + org.apache.thrift.protocol.TList _list355 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list355.size); + @org.apache.thrift.annotation.Nullable TOnlineLogRecord _elem356; + for (int _i357 = 0; _i357 < _list355.size; ++_i357) { - _elem364 = new TOnlineLogRecord(); - _elem364.read(iprot); - struct.success.add(_elem364); + _elem356 = new TOnlineLogRecord(); + _elem356.read(iprot); + struct.success.add(_elem356); } } struct.setSuccessIsSet(true); @@ -54769,14 +53575,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons case 1: // SERVER_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set366 = iprot.readSetBegin(); - struct.serverNames = new java.util.HashSet(2*_set366.size); - @org.apache.thrift.annotation.Nullable TServerName _elem367; - for (int _i368 = 0; _i368 < _set366.size; ++_i368) + org.apache.thrift.protocol.TSet _set358 = iprot.readSetBegin(); + struct.serverNames = new java.util.HashSet(2*_set358.size); + @org.apache.thrift.annotation.Nullable TServerName _elem359; + for (int _i360 = 0; _i360 < _set358.size; ++_i360) { - _elem367 = new TServerName(); - _elem367.read(iprot); - struct.serverNames.add(_elem367); + _elem359 = new TServerName(); + _elem359.read(iprot); + struct.serverNames.add(_elem359); } iprot.readSetEnd(); } @@ -54804,9 +53610,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clearSlowLogRespon oprot.writeFieldBegin(SERVER_NAMES_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, struct.serverNames.size())); - for (TServerName _iter369 : struct.serverNames) + for (TServerName _iter361 : struct.serverNames) { - _iter369.write(oprot); + _iter361.write(oprot); } oprot.writeSetEnd(); } @@ -54837,9 +53643,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogRespons if (struct.isSetServerNames()) { { oprot.writeI32(struct.serverNames.size()); - for (TServerName _iter370 : struct.serverNames) + for (TServerName _iter362 : struct.serverNames) { - _iter370.write(oprot); + _iter362.write(oprot); } } } @@ -54851,14 +53657,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponse java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TSet _set371 = iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.serverNames = new java.util.HashSet(2*_set371.size); - @org.apache.thrift.annotation.Nullable TServerName _elem372; - for (int _i373 = 0; _i373 < _set371.size; ++_i373) + org.apache.thrift.protocol.TSet _set363 = iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.serverNames = new java.util.HashSet(2*_set363.size); + @org.apache.thrift.annotation.Nullable TServerName _elem364; + for (int _i365 = 0; _i365 < _set363.size; ++_i365) { - _elem372 = new TServerName(); - _elem372.read(iprot); - struct.serverNames.add(_elem372); + _elem364 = new TServerName(); + _elem364.read(iprot); + struct.serverNames.add(_elem364); } } struct.setServerNamesIsSet(true); @@ -55268,13 +54074,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list374 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list374.size); - boolean _elem375; - for (int _i376 = 0; _i376 < _list374.size; ++_i376) + org.apache.thrift.protocol.TList _list366 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list366.size); + boolean _elem367; + for (int _i368 = 0; _i368 < _list366.size; ++_i368) { - _elem375 = iprot.readBool(); - struct.success.add(_elem375); + _elem367 = iprot.readBool(); + struct.success.add(_elem367); } iprot.readListEnd(); } @@ -55311,9 +54117,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clearSlowLogRespon oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, struct.success.size())); - for (boolean _iter377 : struct.success) + for (boolean _iter369 : struct.success) { - oprot.writeBool(_iter377); + oprot.writeBool(_iter369); } oprot.writeListEnd(); } @@ -55352,9 +54158,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogRespons if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (boolean _iter378 : struct.success) + for (boolean _iter370 : struct.success) { - oprot.writeBool(_iter378); + oprot.writeBool(_iter370); } } } @@ -55369,13 +54175,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponse java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list379 = iprot.readListBegin(org.apache.thrift.protocol.TType.BOOL); - struct.success = new java.util.ArrayList(_list379.size); - boolean _elem380; - for (int _i381 = 0; _i381 < _list379.size; ++_i381) + org.apache.thrift.protocol.TList _list371 = iprot.readListBegin(org.apache.thrift.protocol.TType.BOOL); + struct.success = new java.util.ArrayList(_list371.size); + boolean _elem372; + for (int _i373 = 0; _i373 < _list371.size; ++_i373) { - _elem380 = iprot.readBool(); - struct.success.add(_elem380); + _elem372 = iprot.readBool(); + struct.success.add(_elem372); } } struct.setSuccessIsSet(true); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java index b4ad2b9612ce..7ae44c656f75 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class THRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionInfo"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java index 8c9f2ba14d22..0ab177748cc8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class THRegionLocation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionLocation"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java index 86f8077cda47..ae7bfae18c10 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java @@ -12,7 +12,7 @@ * to the HBase master or a HBase region server. Also used to return * more general HBase error conditions. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TIOError extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIOError"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java index 9b634c54f10c..3b51cf8e92e5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java @@ -11,7 +11,7 @@ * A TIllegalArgument exception indicates an illegal or invalid * argument was passed into a procedure. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TIllegalArgument extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIllegalArgument"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java index 746f2199eedd..8794289d7b15 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java @@ -14,7 +14,7 @@ * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java index dd723fd73242..fd62e9f2153e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.KeepDeletedCells */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TKeepDeletedCells implements org.apache.thrift.TEnum { /** * Deleted Cells are not retained. diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java index 49f7d5a4cd70..a2000dc9eb85 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.client.LogQueryFilter */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TLogQueryFilter implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TLogQueryFilter"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java index a353374d1004..f291ed912df5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TLogType implements org.apache.thrift.TEnum { SLOW_LOG(1), LARGE_LOG(2); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java index 6039ac39c02c..5fbd7d2c95fc 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java @@ -10,7 +10,7 @@ /** * Atomic mutation for the specified row. It can be either Put or Delete. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TMutation extends org.apache.thrift.TUnion { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TMutation"); private static final org.apache.thrift.protocol.TField PUT_FIELD_DESC = new org.apache.thrift.protocol.TField("put", org.apache.thrift.protocol.TType.STRUCT, (short)1); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java index ad79d1f7117f..ca91ba474e2c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.NamespaceDescriptor */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TNamespaceDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TNamespaceDescriptor"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java index c3d6cba7ad20..5611f6cefff4 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.client.OnlineLogRecord */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-01-12") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TOnlineLogRecord implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOnlineLogRecord"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java index 7ca83ced9d51..6ca509e08cf4 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TPermissionScope implements org.apache.thrift.TEnum { TABLE(0), NAMESPACE(1); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java index b458182fdc4c..2f4a7a995c83 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java @@ -19,7 +19,7 @@ * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TPut implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPut"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java index 8af01cd1ed4c..b9bf47e7c19b 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TReadType implements org.apache.thrift.TEnum { DEFAULT(1), STREAM(2), diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java index 757856e3e649..615fccdd2edd 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java @@ -10,7 +10,7 @@ /** * if no Result is found, row and columnValues will not be set. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TResult"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java index 6accf9d569ae..b70fcb4254f1 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java @@ -10,7 +10,7 @@ /** * A TRowMutations object is used to apply a number of Mutations to a single row. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TRowMutations implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowMutations"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java index 6cfae7ed49e7..32c13cee2a94 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java @@ -11,7 +11,7 @@ * Any timestamps in the columns are ignored but the colFamTimeRangeMap included, use timeRange to select by timestamp. * Max versions defaults to 1. */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java index ec426e3296d3..f39adc98d9c5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TServerName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TServerName"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java index 7a3079a1a4c3..49ca2667186d 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.client.TableDescriptor */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TTableDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableDescriptor"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java index 80ae046e2ad3..5e19f0329b75 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java @@ -11,7 +11,7 @@ * Thrift wrapper around * org.apache.hadoop.hbase.TableName */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TTableName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableName"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java index 722b0f582396..f959ffd7f710 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java @@ -10,7 +10,7 @@ /** * Specify type of thrift server: thrift and thrift2 */ -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public enum TThriftServerType implements org.apache.thrift.TEnum { ONE(1), TWO(2); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java index fefa7cbf3e93..a6a341aaa5ba 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift2.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2024-05-08") public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTimeRange"); diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift index ed3fdf32b973..ec6504447c1d 100644 --- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift +++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift @@ -1006,24 +1006,6 @@ service THBaseService { 1: required TTableName tableName ) throws (1: TIOError io) - /** - * Use this api to check if the table has been created with the specified number of splitkeys - * which was used while creating the given table. Note : If this api is used after a table's - * region gets splitted, the api may return false. - * - * @return true if table is available, false if not - * - * @deprecated Since 2.2.0. Because the same method in Table interface has been deprecated - * since 2.0.0, we will remove it in 3.0.0 release. - * Use {@link #isTableAvailable(TTableName tableName)} instead - **/ - bool isTableAvailableWithSplit( - /** the tablename to check */ - 1: required TTableName tableName - /** keys to check if the table has been created with all split keys */ - 2: optional list splitKeys - ) throws (1: TIOError io) - /** * Add a column family to an existing table. Synchronous operation. **/ From 2dbbcdf84932387f4f192e0e9232a294c9b7d642 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 May 2024 22:06:11 +0800 Subject: [PATCH 366/514] HBASE-28578 Remove deprecated methods in HFileScanner (#5885) Signed-off-by: Xin Sun --- .../hadoop/hbase/io/HalfStoreFileReader.java | 14 ------------- .../hbase/io/hfile/HFileReaderImpl.java | 21 ------------------- .../hadoop/hbase/io/hfile/HFileScanner.java | 18 ---------------- .../hadoop/hbase/io/hfile/TestReseekTo.java | 5 +++-- 4 files changed, 3 insertions(+), 55 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 2119a3e7cbef..f4cccfd03b04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -105,13 +105,6 @@ public Cell getKey() { return delegate.getKey(); } - @Override - public String getKeyString() { - if (atEnd) return null; - - return delegate.getKeyString(); - } - @Override public ByteBuffer getValue() { if (atEnd) return null; @@ -119,13 +112,6 @@ public ByteBuffer getValue() { return delegate.getValue(); } - @Override - public String getValueString() { - if (atEnd) return null; - - return delegate.getValueString(); - } - @Override public Cell getCell() { if (atEnd) return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 9c9b38c4906b..e0585c6edaa2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1040,16 +1040,6 @@ protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { } } - @Override - public String getKeyString() { - return CellUtil.toString(getKey(), false); - } - - @Override - public String getValueString() { - return ByteBufferUtils.toStringBinary(getValue()); - } - public int compareKey(CellComparator comparator, Cell key) { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair); this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen, rowLen); @@ -1571,17 +1561,6 @@ public Cell getCell() { return seeker.getCell(); } - @Override - public String getKeyString() { - return CellUtil.toString(getKey(), false); - } - - @Override - public String getValueString() { - ByteBuffer valueBuffer = getValue(); - return ByteBufferUtils.toStringBinary(valueBuffer); - } - private void assertValidSeek() { if (this.curBlock == null) { throw new NotSeekedException(reader.getPath()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 0393d3b788a7..b0022788c38f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -106,24 +106,6 @@ public interface HFileScanner extends Shipper, Closeable { /** Returns Instance of {@link org.apache.hadoop.hbase.Cell}. */ Cell getCell(); - /** - * Convenience method to get a copy of the key as a string - interpreting the bytes as UTF8. You - * must call {@link #seekTo(Cell)} before this method. - * @return key as a string - * @deprecated Since hbase-2.0.0 - */ - @Deprecated - String getKeyString(); - - /** - * Convenience method to get a copy of the value as a string - interpreting the bytes as UTF8. You - * must call {@link #seekTo(Cell)} before this method. - * @return value as a string - * @deprecated Since hbase-2.0.0 - */ - @Deprecated - String getValueString(); - /** Returns Reader that underlies this Scanner instance. */ HFile.Reader getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index c757798a3940..b40f39777d8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; import org.junit.Test; @@ -118,7 +119,7 @@ private void testReseekToInternals(TagUsage tagUsage) throws IOException { long start = System.nanoTime(); scanner.seekTo(new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"), Bytes.toBytes(value))); - assertEquals(value, scanner.getValueString()); + assertEquals(value, ByteBufferUtils.toStringBinary(scanner.getValue())); } scanner.seekTo(); @@ -128,7 +129,7 @@ private void testReseekToInternals(TagUsage tagUsage) throws IOException { long start = System.nanoTime(); scanner.reseekTo(new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"), Bytes.toBytes(value))); - assertEquals("i is " + i, value, scanner.getValueString()); + assertEquals("i is " + i, value, ByteBufferUtils.toStringBinary(scanner.getValue())); } reader.close(); From b260199882ba9148e4b1fe6ae201259defcdd545 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 May 2024 22:07:40 +0800 Subject: [PATCH 367/514] HBASE-28579 Hide HFileScanner related methods in StoreFileReader (#5889) Signed-off-by: Xin Sun --- .../hadoop/hbase/io/HalfStoreFileReader.java | 4 +- .../hbase/regionserver/StoreFileReader.java | 26 +-- .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 99 ++++----- .../hbase/io/TestHalfStoreFileReader.java | 42 ++-- .../hbase/regionserver/TestCompaction.java | 14 +- .../regionserver/TestFSErrorsExposed.java | 73 ++++--- .../hbase/regionserver/TestHStoreFile.java | 194 +++++++++--------- .../regionserver/TestMajorCompaction.java | 36 ++-- .../regionserver/TestRegionReplicas.java | 25 ++- 9 files changed, 262 insertions(+), 251 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index f4cccfd03b04..0989f73df0a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -92,7 +92,7 @@ protected boolean isTop() { } @Override - public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, + protected HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, final boolean isCompaction) { final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { @@ -283,7 +283,7 @@ public Optional getLastKey() { return super.getLastKey(); } // Get a scanner that caches the block and that uses pread. - HFileScanner scanner = getScanner(true, true); + HFileScanner scanner = getScanner(true, true, false); try { if (scanner.seekBefore(this.splitCell)) { return Optional.ofNullable(scanner.getKey()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 09c379227bda..4f872d7084e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -183,31 +183,9 @@ void readCompleted() { } /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level - * concepts. - * @param cacheBlocks should we cache the blocks? - * @param pread use pread (for concurrent small readers) - * @return the underlying HFileScanner - * @see HBASE-15296 + * Will be overridden in HalfStoreFileReader */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { - return getScanner(cacheBlocks, pread, false); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level - * concepts. should we cache the blocks? use pread (for concurrent small readers) is - * scanner being used for compaction? - * @return the underlying HFileScanner - * @see HBASE-15296 - */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { + protected HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { return reader.getScanner(conf, cacheBlocks, pread, isCompaction); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 9b4e1aea9066..24578417ef34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.AsyncAdmin; @@ -74,7 +75,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.HFileLink; -import org.apache.hadoop.hbase.io.HalfStoreFileReader; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -83,11 +83,12 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileInfo; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; +import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; @@ -757,6 +758,41 @@ static void splitStoreFile(AsyncTableRegionLocator loc, Configuration conf, Path copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc, loc); } + private static StoreFileWriter initStoreFileWriter(Configuration conf, Cell cell, + HFileContext hFileContext, CacheConfig cacheConf, BloomType bloomFilterType, FileSystem fs, + Path outFile, AsyncTableRegionLocator loc) throws IOException { + if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { + byte[] rowKey = CellUtil.cloneRow(cell); + HRegionLocation hRegionLocation = FutureUtils.get(loc.getRegionLocation(rowKey)); + InetSocketAddress[] favoredNodes = null; + if (null == hRegionLocation) { + LOG.warn("Failed get region location for rowkey {} , Using writer without favoured nodes.", + Bytes.toString(rowKey)); + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + } else { + LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey)); + InetSocketAddress initialIsa = + new InetSocketAddress(hRegionLocation.getHostname(), hRegionLocation.getPort()); + if (initialIsa.isUnresolved()) { + LOG.warn("Failed get location for region {} , Using writer without favoured nodes.", + hRegionLocation); + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + } else { + LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString()); + favoredNodes = new InetSocketAddress[] { initialIsa }; + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext) + .withFavoredNodes(favoredNodes).build(); + } + } + } else { + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + } + } + /** * Copy half of an HFile into a new HFile with favored nodes. */ @@ -765,14 +801,14 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, throws IOException { FileSystem fs = inFile.getFileSystem(conf); CacheConfig cacheConf = CacheConfig.DISABLED; - HalfStoreFileReader halfReader = null; + StoreFileReader halfReader = null; StoreFileWriter halfWriter = null; try { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build(); StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, fs.getFileStatus(inFile), reference); storeFileInfo.initHFileInfo(context); - halfReader = (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConf); + halfReader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(halfReader.getHFileReader()); Map fileInfo = halfReader.loadFileInfo(); @@ -785,51 +821,22 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .withCreateTime(EnvironmentEdgeManager.currentTime()).build(); - HFileScanner scanner = halfReader.getScanner(false, false, false); - scanner.seekTo(); - do { - final Cell cell = scanner.getCell(); - if (null != halfWriter) { - halfWriter.append(cell); - } else { - - // init halfwriter - if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { - byte[] rowKey = CellUtil.cloneRow(cell); - HRegionLocation hRegionLocation = FutureUtils.get(loc.getRegionLocation(rowKey)); - InetSocketAddress[] favoredNodes = null; - if (null == hRegionLocation) { - LOG.warn( - "Failed get region location for rowkey {} , Using writer without favoured nodes.", - Bytes.toString(rowKey)); - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); - } else { - LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey)); - InetSocketAddress initialIsa = - new InetSocketAddress(hRegionLocation.getHostname(), hRegionLocation.getPort()); - if (initialIsa.isUnresolved()) { - LOG.warn("Failed get location for region {} , Using writer without favoured nodes.", - hRegionLocation); - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); - } else { - LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString()); - favoredNodes = new InetSocketAddress[] { initialIsa }; - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext) - .withFavoredNodes(favoredNodes).build(); - } - } - } else { - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + try (StoreFileScanner scanner = + halfReader.getStoreFileScanner(false, false, false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + for (;;) { + Cell cell = scanner.next(); + if (cell == null) { + break; + } + if (halfWriter == null) { + // init halfwriter + halfWriter = initStoreFileWriter(conf, cell, hFileContext, cacheConf, bloomFilterType, + fs, outFile, loc); } halfWriter.append(cell); } - - } while (scanner.next()); - + } for (Map.Entry entry : fileInfo.entrySet()) { if (shouldCopyHFileMetaKey(entry.getKey())) { halfWriter.appendFileInfo(entry.getKey(), entry.getValue()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 7dd4cbe44f93..0a41159e3aaa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -124,21 +124,22 @@ private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, Cach (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(halfreader.getHFileReader()); halfreader.loadFileInfo(); - final HFileScanner scanner = halfreader.getScanner(false, false); - - scanner.seekTo(); - Cell curr; - do { - curr = scanner.getCell(); - KeyValue reseekKv = getLastOnCol(curr); - int ret = scanner.reseekTo(reseekKv); - assertTrue("reseek to returned: " + ret, ret > 0); - // System.out.println(curr + ": " + ret); - } while (scanner.next()); - - int ret = scanner.reseekTo(getLastOnCol(curr)); - // System.out.println("Last reseek: " + ret); - assertTrue(ret > 0); + try (HFileScanner scanner = halfreader.getScanner(false, false, false)) { + + scanner.seekTo(); + Cell curr; + do { + curr = scanner.getCell(); + KeyValue reseekKv = getLastOnCol(curr); + int ret = scanner.reseekTo(reseekKv); + assertTrue("reseek to returned: " + ret, ret > 0); + // System.out.println(curr + ": " + ret); + } while (scanner.next()); + + int ret = scanner.reseekTo(getLastOnCol(curr)); + // System.out.println("Last reseek: " + ret); + assertTrue(ret > 0); + } halfreader.close(true); } @@ -222,9 +223,14 @@ private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell se (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConfig); storeFileInfo.getHFileInfo().initMetaAndIndex(halfreader.getHFileReader()); halfreader.loadFileInfo(); - final HFileScanner scanner = halfreader.getScanner(false, false); - scanner.seekBefore(seekBefore); - return scanner.getCell(); + try (HFileScanner scanner = halfreader.getScanner(false, false, false)) { + scanner.seekBefore(seekBefore); + if (scanner.getCell() != null) { + return KeyValueUtil.copyToNewKeyValue(scanner.getCell()); + } else { + return null; + } + } } private KeyValue getLastOnCol(Cell curr) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index c0bc72079cb7..9b43ab32c2c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -62,7 +63,6 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; @@ -316,13 +316,13 @@ public Object answer(InvocationOnMock invocation) throws Throwable { private int count() throws IOException { int count = 0; for (HStoreFile f : this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); - if (!scanner.seekTo()) { - continue; + f.initReader(); + try (StoreFileScanner scanner = f.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } - do { - count++; - } while (scanner.next()); } return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 55320e94a9f9..f4fff4f5cbe4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -99,26 +99,26 @@ public void testHFileScannerThrowsErrors() throws IOException { BloomType.NONE, true); sf.initReader(); StoreFileReader reader = sf.getReader(); - HFileScanner scanner = reader.getScanner(false, true); + try (HFileScanner scanner = reader.getScanner(false, true, false)) { + FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); + assertNotNull(inStream); - FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); - assertNotNull(inStream); + scanner.seekTo(); + // Do at least one successful read + assertTrue(scanner.next()); - scanner.seekTo(); - // Do at least one successful read - assertTrue(scanner.next()); + faultyfs.startFaults(); - faultyfs.startFaults(); - - try { - int scanned = 0; - while (scanner.next()) { - scanned++; + try { + int scanned = 0; + while (scanner.next()) { + scanned++; + } + fail("Scanner didn't throw after faults injected"); + } catch (IOException ioe) { + LOG.info("Got expected exception", ioe); + assertTrue(ioe.getMessage().contains("Fault")); } - fail("Scanner didn't throw after faults injected"); - } catch (IOException ioe) { - LOG.info("Got expected exception", ioe); - assertTrue(ioe.getMessage().contains("Fault")); } reader.close(true); // end of test so evictOnClose } @@ -147,27 +147,32 @@ public void testStoreFileScannerThrowsErrors() throws IOException { Collections.singletonList(sf), false, true, false, false, // 0 is passed as readpoint because this test operates on HStoreFile directly 0); - KeyValueScanner scanner = scanners.get(0); + try { + KeyValueScanner scanner = scanners.get(0); - FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); - assertNotNull(inStream); + FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); + assertNotNull(inStream); - scanner.seek(KeyValue.LOWESTKEY); - // Do at least one successful read - assertNotNull(scanner.next()); - faultyfs.startFaults(); + scanner.seek(KeyValue.LOWESTKEY); + // Do at least one successful read + assertNotNull(scanner.next()); + faultyfs.startFaults(); - try { - int scanned = 0; - while (scanner.next() != null) { - scanned++; + try { + int scanned = 0; + while (scanner.next() != null) { + scanned++; + } + fail("Scanner didn't throw after faults injected"); + } catch (IOException ioe) { + LOG.info("Got expected exception", ioe); + assertTrue(ioe.getMessage().contains("Could not iterate")); + } + } finally { + for (StoreFileScanner scanner : scanners) { + scanner.close(); } - fail("Scanner didn't throw after faults injected"); - } catch (IOException ioe) { - LOG.info("Got expected exception", ioe); - assertTrue(ioe.getMessage().contains("Could not iterate")); } - scanner.close(); } /** @@ -202,13 +207,13 @@ public void testFullSystemBubblesFSErrors() throws Exception { // Load some data util.loadTable(table, fam, false); util.flush(); - util.countRows(table); + HBaseTestingUtil.countRows(table); // Kill the DFS cluster util.getDFSCluster().shutdownDataNodes(); try { - util.countRows(table); + HBaseTestingUtil.countRows(table); fail("Did not fail to count after removing data"); } catch (Exception e) { LOG.info("Got expected error", e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index aa7fb53566df..824c195fd0f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -245,19 +245,20 @@ public void testReference() throws IOException { refHsf.initReader(); // Now confirm that I can read from the reference and that it only gets // keys from top half of the file. - HFileScanner s = refHsf.getReader().getScanner(false, false); - Cell kv = null; - for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { - ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey()); - kv = KeyValueUtil.createKeyValueFromKey(bb); - if (first) { - assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, - midRow.length)); - first = false; + try (HFileScanner s = refHsf.getReader().getScanner(false, false, false)) { + Cell kv = null; + for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { + ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey()); + kv = KeyValueUtil.createKeyValueFromKey(bb); + if (first) { + assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, + midRow.length)); + first = false; + } } + assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, + finalRow.length)); } - assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, - finalRow.length)); } @Test @@ -333,11 +334,12 @@ public void testHFileLink() throws IOException { hsf.initReader(); // Now confirm that I can read from the link - int count = 1; - HFileScanner s = hsf.getReader().getScanner(false, false); - s.seekTo(); - while (s.next()) { - count++; + int count = 0; + try (StoreFileScanner scanner = hsf.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count); } @@ -395,26 +397,25 @@ public void testReferenceToHFileLink() throws IOException { hsfA.initReader(); // Now confirm that I can read from the ref to link - int count = 1; - HFileScanner s = hsfA.getReader().getScanner(false, false); - s.seekTo(); - while (s.next()) { - count++; + int count = 0; + try (StoreFileScanner scanner = hsfA.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } + assertTrue(count > 0); // read some rows here } - assertTrue(count > 0); // read some rows here // Try to open store file from link HStoreFile hsfB = new HStoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE, true); hsfB.initReader(); // Now confirm that I can read from the ref to link - HFileScanner sB = hsfB.getReader().getScanner(false, false); - sB.seekTo(); - - // count++ as seekTo() will advance the scanner - count++; - while (sB.next()) { - count++; + try (StoreFileScanner scanner = hsfB.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } // read the rest of the rows @@ -454,39 +455,41 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f // Now test reading from the top. boolean first = true; ByteBuffer key = null; - HFileScanner topScanner = top.getScanner(false, false); - while ( - (!topScanner.isSeeked() && topScanner.seekTo()) - || (topScanner.isSeeked() && topScanner.next()) - ) { - key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); - - if ( - (PrivateCellUtil.compare(topScanner.getReader().getComparator(), midKV, key.array(), - key.arrayOffset(), key.limit())) > 0 + try (HFileScanner topScanner = top.getScanner(false, false, false)) { + while ( + (!topScanner.isSeeked() && topScanner.seekTo()) + || (topScanner.isSeeked() && topScanner.next()) ) { - fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey); - } - if (first) { - first = false; - LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key))); + key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); + + if ( + (PrivateCellUtil.compare(topScanner.getReader().getComparator(), midKV, key.array(), + key.arrayOffset(), key.limit())) > 0 + ) { + fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey); + } + if (first) { + first = false; + LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key))); + } } } LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key))); first = true; - HFileScanner bottomScanner = bottom.getScanner(false, false); - while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { - previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); - key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); - if (first) { - first = false; - LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous))); + try (HFileScanner bottomScanner = bottom.getScanner(false, false, false)) { + while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { + previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); + key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); + if (first) { + first = false; + LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous))); + } + assertTrue(key.compareTo(bbMidkeyBytes) < 0); + } + if (previous != null) { + LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } - assertTrue(key.compareTo(bbMidkeyBytes) < 0); - } - if (previous != null) { - LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } // Remove references. regionFs.cleanupDaughterRegion(topHri); @@ -507,29 +510,31 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f top = topF.getReader(); // Now read from the top. first = true; - topScanner = top.getScanner(false, false); - KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); - while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { - key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); - keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit()); - assertTrue(PrivateCellUtil.compare(topScanner.getReader().getComparator(), keyOnlyKV, - badmidkey, 0, badmidkey.length) >= 0); - if (first) { - first = false; - KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("First top when key < bottom: " + keyKV); - String tmp = - Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); - for (int i = 0; i < tmp.length(); i++) { - assertTrue(tmp.charAt(i) == 'a'); + try (HFileScanner topScanner = top.getScanner(false, false, false)) { + KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); + while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { + key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); + keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit()); + assertTrue(PrivateCellUtil.compare(topScanner.getReader().getComparator(), keyOnlyKV, + badmidkey, 0, badmidkey.length) >= 0); + if (first) { + first = false; + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("First top when key < bottom: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(tmp.charAt(i) == 'a'); + } } } - } - KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("Last top when key < bottom: " + keyKV); - String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); - for (int i = 0; i < tmp.length(); i++) { - assertTrue(tmp.charAt(i) == 'z'); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("Last top when key < bottom: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(tmp.charAt(i) == 'z'); + } } // Remove references. regionFs.cleanupDaughterRegion(topHri); @@ -545,25 +550,28 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f bottomF.initReader(); bottom = bottomF.getReader(); first = true; - bottomScanner = bottom.getScanner(false, false); - while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { - key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); - if (first) { - first = false; - keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("First bottom when key > top: " + keyKV); - tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); - for (int i = 0; i < tmp.length(); i++) { - assertTrue(tmp.charAt(i) == 'a'); + try (HFileScanner bottomScanner = bottom.getScanner(false, false, false)) { + while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { + key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); + if (first) { + first = false; + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("First bottom when key > top: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(tmp.charAt(i) == 'a'); + } } } - } - keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("Last bottom when key > top: " + keyKV); - for (int i = 0; i < tmp.length(); i++) { - assertTrue( - Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()).charAt(i) - == 'z'); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("Last bottom when key > top: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()) + .charAt(i) == 'z'); + } } } finally { if (top != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 4c96dc221abf..9ec1ed809316 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -331,16 +331,21 @@ private void verifyCounts(int countRow1, int countRow2) throws Exception { int count1 = 0; int count2 = 0; for (HStoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); - scanner.seekTo(); - do { - byte[] row = CellUtil.cloneRow(scanner.getCell()); - if (Bytes.equals(row, STARTROW)) { - count1++; - } else if (Bytes.equals(row, secondRowBytes)) { - count2++; + try (StoreFileScanner scanner = f.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + for (Cell cell;;) { + cell = scanner.next(); + if (cell == null) { + break; + } + byte[] row = CellUtil.cloneRow(cell); + if (Bytes.equals(row, STARTROW)) { + count1++; + } else if (Bytes.equals(row, secondRowBytes)) { + count2++; + } } - } while (scanner.next()); + } } assertEquals(countRow1, count1); assertEquals(countRow2, count2); @@ -349,13 +354,12 @@ private void verifyCounts(int countRow1, int countRow2) throws Exception { private int count() throws IOException { int count = 0; for (HStoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); - if (!scanner.seekTo()) { - continue; + try (StoreFileScanner scanner = f.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } - do { - count++; - } while (scanner.next()); } return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index f7c65b02d8ba..68c6b6434c4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TestMetaTableAccessor; import org.apache.hadoop.hbase.client.Consistency; @@ -43,7 +44,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -501,16 +501,19 @@ public void testVerifySecondaryAbilityToReadWithOnFiles() throws Exception { // Our file does not exist anymore. was moved by the compaction above. LOG.debug(Boolean.toString(getRS().getFileSystem().exists(sf.getPath()))); Assert.assertFalse(getRS().getFileSystem().exists(sf.getPath())); - - HFileScanner scanner = sf.getReader().getScanner(false, false); - scanner.seekTo(); - do { - keys++; - - Cell cell = scanner.getCell(); - sum += Integer - .parseInt(Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); - } while (scanner.next()); + sf.initReader(); + try (StoreFileScanner scanner = sf.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + for (Cell cell;;) { + cell = scanner.next(); + if (cell == null) { + break; + } + keys++; + sum += Integer.parseInt( + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + } + } } Assert.assertEquals(3000, keys); Assert.assertEquals(4498500, sum); From 6c84d3960ff143f9bdd11b8143a6bab505c4e165 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 May 2024 23:35:14 +0800 Subject: [PATCH 368/514] HBASE-26048 [JDK17] Replace the usage of deprecated API ThreadGroup.destroy() (#5913) Signed-off-by: Xin Sun --- .../hadoop/hbase/procedure2/ProcedureExecutor.java | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 8a5062be7918..0edfac8e8840 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -735,14 +735,11 @@ public void join() { Thread.currentThread().interrupt(); } - // Destroy the Thread Group for the executors - // TODO: Fix. #join is not place to destroy resources. - try { - threadGroup.destroy(); - } catch (IllegalThreadStateException e) { - LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, e); - // This dumps list of threads on STDOUT. - this.threadGroup.list(); + // log the still active threads, ThreadGroup.destroy is deprecated in JDK17 and it is not + // necessary for us to must destroy it here, so we just do a check and log + if (threadGroup.activeCount() > 0) { + LOG.error("There are still active thread in group {}, see STDOUT", threadGroup); + threadGroup.list(); } // reset the in-memory state for testing From b4c271253a9e328d9b6fdf207db183980c4bc4bf Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 17 May 2024 18:31:19 +0200 Subject: [PATCH 369/514] HBASE-28568 Incremental backup set does not correctly shrink (addendum) (#5917) Import the correct shaded Guava and run spotless:apply. Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java | 1 - .../java/org/apache/hadoop/hbase/backup/TestBackupDelete.java | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index de8ca6b7497c..f500581e9d85 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index ef40bc63d086..785859c52805 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.ClassRule; @@ -41,6 +40,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @Category(LargeTests.class) public class TestBackupDelete extends TestBackupBase { From d4b0e18a5e8fbf7261345d2015cd9223d30451db Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 17 May 2024 13:42:42 -0400 Subject: [PATCH 370/514] HBASE-28236 Add 2.6.0 to downloads page (#5919) Signed-off-by: Duo Zhang --- src/site/xdoc/downloads.xml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 039c05c01f38..132dc4927fb6 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -68,6 +68,31 @@ under the License. Feature freeze, passed a 10B ITBLL run, use with caution + + + 2.6.0 + + + 2024/05/17 + + + 2.5.0 vs 2.6.0 + + + Changes + + + Release Notes + + + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) + hadoop3-bin (sha512 asc)
    + hadoop3-client-bin (sha512 asc) + + + 2.5.8 From a3ff01d890b6d220f9056e508aeb1956d2768ff3 Mon Sep 17 00:00:00 2001 From: csringhofer Date: Fri, 17 May 2024 20:55:53 +0200 Subject: [PATCH 371/514] HBASE-28595: check seq id of scan RPCs for closed scanners (#5910) Signed-off-by: Duo Zhang Signed-off-by: Tak Lon (Stephen) Wu --- .../hbase/regionserver/RSRpcServices.java | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index babaa56170ad..66c97fb9401f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -303,10 +303,11 @@ public class RSRpcServices extends HBaseRpcServicesBase private ScannerIdGenerator scannerIdGenerator; private final ConcurrentMap scanners = new ConcurrentHashMap<>(); - // Hold the name of a closed scanner for a while. This is used to keep compatible for old clients - // which may send next or close request to a region scanner which has already been exhausted. The - // entries will be removed automatically after scannerLeaseTimeoutPeriod. - private final Cache closedScanners; + // Hold the name and last sequence number of a closed scanner for a while. This is used + // to keep compatible for old clients which may send next or close request to a region + // scanner which has already been exhausted. The entries will be removed automatically + // after scannerLeaseTimeoutPeriod. + private final Cache closedScanners; /** * The lease timeout period for client scanners (milliseconds). */ @@ -3083,8 +3084,18 @@ private RegionScannerHolder getRegionScanner(ScanRequest request) throws IOExcep RegionScannerHolder rsh = this.scanners.get(scannerName); if (rsh == null) { // just ignore the next or close request if scanner does not exists. - if (closedScanners.getIfPresent(scannerName) != null) { - throw SCANNER_ALREADY_CLOSED; + Long lastCallSeq = closedScanners.getIfPresent(scannerName); + if (lastCallSeq != null) { + // Check the sequence number to catch if the last call was incorrectly retried. + // The only allowed scenario is when the scanner is exhausted and one more scan + // request arrives - in this case returning 0 rows is correct. + if (request.hasNextCallSeq() && request.getNextCallSeq() != lastCallSeq + 1) { + throw new OutOfOrderScannerNextException("Expected nextCallSeq for closed request: " + + (lastCallSeq + 1) + " But the nextCallSeq got from client: " + + request.getNextCallSeq() + "; request=" + TextFormat.shortDebugString(request)); + } else { + throw SCANNER_ALREADY_CLOSED; + } } else { LOG.warn("Client tried to access missing scanner " + scannerName); throw new UnknownScannerException( @@ -3690,7 +3701,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) { scannerClosed = true; - closeScanner(region, scanner, scannerName, rpcCall); + closeScanner(region, scanner, scannerName, rpcCall, false); } // There's no point returning to a timed out client. Throwing ensures scanner is closed @@ -3706,7 +3717,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque // The scanner state might be left in a dirty state, so we will tell the Client to // fail this RPC and close the scanner while opening up another one from the start of // row that the client has last seen. - closeScanner(region, scanner, scannerName, rpcCall); + closeScanner(region, scanner, scannerName, rpcCall, true); // If it is a DoNotRetryIOException already, throw as it is. Unfortunately, DNRIOE is // used in two different semantics. @@ -3770,7 +3781,7 @@ private void runShippedCallback(RegionScannerHolder rsh) throws ServiceException } private void closeScanner(HRegion region, RegionScanner scanner, String scannerName, - RpcCallContext context) throws IOException { + RpcCallContext context, boolean isError) throws IOException { if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost().preScannerClose(scanner)) { // bypass the actual close. @@ -3787,7 +3798,9 @@ private void closeScanner(HRegion region, RegionScanner scanner, String scannerN if (region.getCoprocessorHost() != null) { region.getCoprocessorHost().postScannerClose(scanner); } - closedScanners.put(scannerName, scannerName); + if (!isError) { + closedScanners.put(scannerName, rsh.getNextCallSeq()); + } } } From 6b7aaed600988f9e9858eb7a9172f9af03b61b5c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 18 May 2024 16:55:07 +0800 Subject: [PATCH 372/514] HBASE-28604 Fix the error message in ReservoirSample's constructor (#5920) Signed-off-by: Nick Dimiduk --- .../org/apache/hadoop/hbase/util/ReservoirSample.java | 2 +- .../org/apache/hadoop/hbase/util/TestReservoirSample.java | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReservoirSample.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReservoirSample.java index f50b81154e1c..aa7a84b1ac2b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReservoirSample.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReservoirSample.java @@ -41,7 +41,7 @@ public class ReservoirSample { private int n; public ReservoirSample(int k) { - Preconditions.checkArgument(k > 0, "negative sampling number(%d) is not allowed"); + Preconditions.checkArgument(k > 0, "negative sampling number(%s) is not allowed", k); r = new ArrayList<>(k); this.k = k; } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestReservoirSample.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestReservoirSample.java index 6192edfa036a..d7ea0c5ac0cb 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestReservoirSample.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestReservoirSample.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import java.util.stream.IntStream; @@ -89,4 +90,11 @@ public void testStream() { assertTrue(containsOne > round / 10 * 0.95); assertTrue(containsOne < round / 10 * 1.05); } + + @Test + public void testNegativeSamplingNumber() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> new ReservoirSample(-1)); + assertEquals("negative sampling number(-1) is not allowed", e.getMessage()); + } } From 6b3f5ae1fc1f6e7c7f3caa64576ccc0b400b68b2 Mon Sep 17 00:00:00 2001 From: mrzhao Date: Sat, 18 May 2024 17:12:57 +0800 Subject: [PATCH 373/514] HBASE-28536 Fix `Disable Stripe Compaction` run error in document (#5836) Co-authored-by: mrzhao Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/architecture.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index 3ff09fac63f2..4aead5e3e841 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -2579,7 +2579,7 @@ create 'orders_table', 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' + [source,sql] ---- -alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'rg.apache.hadoop.hbase.regionserver.DefaultStoreEngine'} +alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DefaultStoreEngine'} ---- . Enable the table. From dba7dccb65dfaf5bd88400f12107f28b79aea4a2 Mon Sep 17 00:00:00 2001 From: Youngju KIM Date: Mon, 20 May 2024 10:16:55 +0900 Subject: [PATCH 374/514] =?UTF-8?q?HBASE-28599=C2=A0RowTooBigException=20i?= =?UTF-8?q?s=20thrown=20when=20duplicate=20increment=20RPC=20call=20is=20a?= =?UTF-8?q?ttempted=20(#5927)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The root cause is analyzed by Robin Infant A(robiee17) Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index c55090d3a756..fdc50bc69476 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4036,7 +4036,7 @@ private static Get toGet(final Mutation mutation) throws IOException { assert mutation instanceof Increment || mutation instanceof Append; Get get = new Get(mutation.getRow()); CellScanner cellScanner = mutation.cellScanner(); - while (!cellScanner.advance()) { + while (cellScanner.advance()) { Cell cell = cellScanner.current(); get.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell)); } From 2dc7e1523b0ec24cd058f48703fe1682cd4a3786 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 20 May 2024 09:43:59 +0800 Subject: [PATCH 375/514] HBASE-28547 Support specifying connection configuration through queries of the connection uri (#5853) Signed-off-by: Nick Dimiduk --- .../hbase/client/ConnectionFactory.java | 23 ++++-- .../TestConnectionFactoryApplyURIQueries.java | 80 +++++++++++++++++++ .../org/apache/hadoop/hbase/util/Strings.java | 41 ++++++++++ .../apache/hadoop/hbase/util/TestStrings.java | 64 ++++++++++----- 4 files changed, 182 insertions(+), 26 deletions(-) create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index b9b156bf36d3..144a790c406d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -569,10 +570,16 @@ public static CompletableFuture createAsyncConnection(URI conne Configuration conf, final User user, Map connectionAttributes) { return TraceUtil.tracedFuture(() -> { ConnectionRegistry registry; + Configuration appliedConf; try { - registry = connectionUri != null - ? ConnectionRegistryFactory.create(connectionUri, conf, user) - : ConnectionRegistryFactory.create(conf, user); + if (connectionUri != null) { + appliedConf = new Configuration(conf); + Strings.applyURIQueriesToConf(connectionUri, appliedConf); + registry = ConnectionRegistryFactory.create(connectionUri, appliedConf, user); + } else { + appliedConf = conf; + registry = ConnectionRegistryFactory.create(appliedConf, user); + } } catch (Exception e) { return FutureUtils.failedFuture(e); } @@ -588,12 +595,12 @@ public static CompletableFuture createAsyncConnection(URI conne future.completeExceptionally(new IOException("clusterid came back null")); return; } - Class clazz = conf.getClass(HBASE_CLIENT_ASYNC_CONNECTION_IMPL, - AsyncConnectionImpl.class, AsyncConnection.class); + Class clazz = appliedConf.getClass( + HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class); try { - future.complete( - user.runAs((PrivilegedExceptionAction) () -> ReflectionUtils - .newInstance(clazz, conf, registry, clusterId, null, user, connectionAttributes))); + future.complete(user.runAs((PrivilegedExceptionAction< + ? extends AsyncConnection>) () -> ReflectionUtils.newInstance(clazz, appliedConf, + registry, clusterId, null, user, connectionAttributes))); } catch (Exception e) { registry.close(); future.completeExceptionally(e); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java new file mode 100644 index 000000000000..806c5edeb7fc --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; + +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.ArgumentCaptor; +import org.mockito.MockedStatic; + +@Category({ ClientTests.class, SmallTests.class }) +public class TestConnectionFactoryApplyURIQueries { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestConnectionFactoryApplyURIQueries.class); + + private Configuration conf; + + private MockedStatic mockedConnectionRegistryFactory; + + private ConnectionRegistry registry; + + @Before + public void setUp() { + conf = HBaseConfiguration.create(); + mockedConnectionRegistryFactory = mockStatic(ConnectionRegistryFactory.class); + registry = mock(ConnectionRegistry.class); + mockedConnectionRegistryFactory + .when(() -> ConnectionRegistryFactory.create(any(), any(), any())).thenReturn(registry); + when(registry.getClusterId()).thenReturn(CompletableFuture.completedFuture("cluster")); + } + + @After + public void tearDown() { + mockedConnectionRegistryFactory.closeOnDemand(); + } + + @Test + public void testApplyURIQueries() throws Exception { + ConnectionFactory.createConnection(new URI("hbase+rpc://server:16010?a=1&b=2&c"), conf); + ArgumentCaptor captor = ArgumentCaptor.forClass(Configuration.class); + mockedConnectionRegistryFactory + .verify(() -> ConnectionRegistryFactory.create(any(), captor.capture(), any())); + Configuration c = captor.getValue(); + assertEquals("1", c.get("a")); + assertEquals("2", c.get("b")); + assertEquals("", c.get("c")); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java index 3baab9cca211..b5d760bf0d7e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Strings.java @@ -17,7 +17,15 @@ */ package org.apache.hadoop.hbase.util; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; @@ -94,4 +102,37 @@ public static String padFront(String input, char padding, int length) { int numPaddingCharacters = length - input.length(); return StringUtils.repeat(padding, numPaddingCharacters) + input; } + + /** + * Parse the query string of an URI to a key value map. If a single key occurred multiple times, + * only the first one will take effect. + */ + public static Map parseURIQueries(URI uri) { + if (StringUtils.isBlank(uri.getRawQuery())) { + return Collections.emptyMap(); + } + return Splitter.on('&').trimResults().splitToStream(uri.getRawQuery()).map(kv -> { + int idx = kv.indexOf('='); + try { + if (idx > 0) { + return Pair.newPair( + URLDecoder.decode(kv.substring(0, idx), StandardCharsets.UTF_8.name()), + URLDecoder.decode(kv.substring(idx + 1), StandardCharsets.UTF_8.name())); + } else { + return Pair.newPair(URLDecoder.decode(kv, StandardCharsets.UTF_8.name()), ""); + } + } catch (UnsupportedEncodingException e) { + // should not happen + throw new AssertionError(e); + } + }).collect(Collectors.toMap(Pair::getFirst, Pair::getSecond, (v1, v2) -> v1)); + } + + /** + * Apply the key value pairs in the query string of the given URI to the given Configuration. If a + * single key occurred multiple times, only the first one will take effect. + */ + public static void applyURIQueriesToConf(URI uri, Configuration conf) { + parseURIQueries(uri).forEach(conf::set); + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestStrings.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestStrings.java index e2803414e1ad..8528fd88beb1 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestStrings.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestStrings.java @@ -17,51 +17,79 @@ */ package org.apache.hadoop.hbase.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.net.URI; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Assert; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; @Category({ SmallTests.class }) public class TestStrings { - @Rule - public final ExpectedException thrown = ExpectedException.none(); - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestStrings.class); @Test public void testAppendKeyValue() { - Assert.assertEquals("foo, bar=baz", + assertEquals("foo, bar=baz", Strings.appendKeyValue(new StringBuilder("foo"), "bar", "baz").toString()); - Assert.assertEquals("bar->baz", + assertEquals("bar->baz", Strings.appendKeyValue(new StringBuilder(), "bar", "baz", "->", "| ").toString()); - Assert.assertEquals("foo, bar=baz", + assertEquals("foo, bar=baz", Strings.appendKeyValue(new StringBuilder("foo"), "bar", "baz", "=", ", ").toString()); - Assert.assertEquals("foo| bar->baz", + assertEquals("foo| bar->baz", Strings.appendKeyValue(new StringBuilder("foo"), "bar", "baz", "->", "| ").toString()); } @Test public void testDomainNamePointerToHostName() { - Assert.assertNull(Strings.domainNamePointerToHostName(null)); - Assert.assertEquals("foo", Strings.domainNamePointerToHostName("foo")); - Assert.assertEquals("foo.com", Strings.domainNamePointerToHostName("foo.com")); - Assert.assertEquals("foo.bar.com", Strings.domainNamePointerToHostName("foo.bar.com")); - Assert.assertEquals("foo.bar.com", Strings.domainNamePointerToHostName("foo.bar.com.")); + assertNull(Strings.domainNamePointerToHostName(null)); + assertEquals("foo", Strings.domainNamePointerToHostName("foo")); + assertEquals("foo.com", Strings.domainNamePointerToHostName("foo.com")); + assertEquals("foo.bar.com", Strings.domainNamePointerToHostName("foo.bar.com")); + assertEquals("foo.bar.com", Strings.domainNamePointerToHostName("foo.bar.com.")); } @Test public void testPadFront() { - Assert.assertEquals("ddfoo", Strings.padFront("foo", 'd', 5)); + assertEquals("ddfoo", Strings.padFront("foo", 'd', 5)); + assertThrows(IllegalArgumentException.class, () -> Strings.padFront("foo", 'd', 1)); + } + + @Test + public void testParseURIQueries() throws Exception { + Map queries = Strings.parseURIQueries(new URI("hbase+rpc://server01:123?a=1&b=2&a=3&" + + URLEncoder.encode("& ?", StandardCharsets.UTF_8.name()) + "=&" + + URLEncoder.encode("===", StandardCharsets.UTF_8.name()))); + assertEquals("1", queries.get("a")); + assertEquals("2", queries.get("b")); + assertEquals("", queries.get("& ?")); + assertEquals("", queries.get("===")); + assertEquals(4, queries.size()); + + assertTrue(Strings.parseURIQueries(new URI("hbase+zk://zk1:2181/")).isEmpty()); + assertTrue(Strings.parseURIQueries(new URI("hbase+zk://zk1:2181/?")).isEmpty()); + assertTrue(Strings.parseURIQueries(new URI("hbase+zk://zk1:2181/?#anchor")).isEmpty()); + } - thrown.expect(IllegalArgumentException.class); - Strings.padFront("foo", 'd', 1); + @Test + public void testApplyURIQueriesToConf() throws Exception { + Configuration conf = new Configuration(); + Strings.applyURIQueriesToConf(new URI("hbase+zk://aaa:2181/root?a=1&b=2&c"), conf); + assertEquals("1", conf.get("a")); + assertEquals("2", conf.get("b")); + assertEquals("", conf.get("c")); } } From 9f773d4db028cf2a088ea0c80fb55704841a5959 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Mon, 20 May 2024 17:03:19 -0400 Subject: [PATCH 376/514] HBASE-28232 Add release manager for 2.6 in ref guide (#5921) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/community.adoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/main/asciidoc/_chapters/community.adoc b/src/main/asciidoc/_chapters/community.adoc index bdfef4196542..d62fb22dcacf 100644 --- a/src/main/asciidoc/_chapters/community.adoc +++ b/src/main/asciidoc/_chapters/community.adoc @@ -173,6 +173,11 @@ If this list goes out of date or you can't reach the listed person, reach out to | Check the https://hbase.apache.org/downloads.html[download] page | *NOT YET* +| 2.6 +| Bryan Beaudreault +| Check the https://hbase.apache.org/downloads.html[download] page +| *NOT YET* + |=== [[hbase.commit.msg.format]] From 8a5337b3e435a947f4ad04ecf4f81b6c3d99e5ba Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 21 May 2024 09:04:13 +0200 Subject: [PATCH 377/514] HBASE-28501 Support non-SPNEGO authentication methods and implement session handling in REST java client library (addendum: revert incompatible API change) (#5928) Signed-off-by: Duo Zhang --- .../hadoop/hbase/rest/client/Client.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index a7df571fb2f7..620497d08ba7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -185,7 +185,7 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, /** * Constructor This constructor will create an object using the old faulty load balancing logic. * When specifying multiple servers in the cluster object, it is highly recommended to call - * setSticky() on the created client, or use one of the preferred constructors instead. + * setSticky() on the created client, or use the preferred constructor instead. * @param cluster the cluster definition */ public Client(Cluster cluster) { @@ -195,7 +195,7 @@ public Client(Cluster cluster) { /** * Constructor This constructor will create an object using the old faulty load balancing logic. * When specifying multiple servers in the cluster object, it is highly recommended to call - * setSticky() on the created client, or use one of the preferred constructors instead. + * setSticky() on the created client, or use the preferred constructor instead. * @param cluster the cluster definition * @param sslEnabled enable SSL or not */ @@ -207,7 +207,7 @@ public Client(Cluster cluster, boolean sslEnabled) { /** * Constructor This constructor will create an object using the old faulty load balancing logic. * When specifying multiple servers in the cluster object, it is highly recommended to call - * setSticky() on the created client, or use one of the preferred constructors instead. + * setSticky() on the created client, or use the preferred constructor instead. * @param cluster the cluster definition * @param conf Configuration * @param sslEnabled enable SSL or not @@ -221,7 +221,7 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { * Constructor, allowing to define custom trust store (only for SSL connections) This constructor * will create an object using the old faulty load balancing logic. When specifying multiple * servers in the cluster object, it is highly recommended to call setSticky() on the created - * client, or use one of the preferred constructors instead. + * client, or use the preferred constructor instead. * @param cluster the cluster definition * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store @@ -230,8 +230,7 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { */ public Client(Cluster cluster, String trustStorePath, Optional trustStorePassword, Optional trustStoreType) { - this(cluster, HBaseConfiguration.create(), true, trustStorePath, trustStorePassword, - trustStoreType); + this(cluster, HBaseConfiguration.create(), trustStorePath, trustStorePassword, trustStoreType); } /** @@ -255,9 +254,10 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled, } /** - * Constructor, allowing to define custom trust store (only for SSL connections) This constructor - * also enables sticky mode. This is a preferred constructor when not using BASIC or JWT - * authentication. Clients created by this will use the old faulty load balancing logic. + * Constructor, allowing to define custom trust store (only for SSL connections). This constructor + * will create an object using the old faulty load balancing logic. When specifying multiple + * servers in the cluster object, it is highly recommended to call setSticky() on the created + * client, or use the preferred constructor instead. * @param cluster the cluster definition * @param conf HBase/Hadoop Configuration * @param trustStorePath custom trust store to use for SSL connections @@ -265,10 +265,10 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled, * @param trustStoreType type of custom trust store * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ - public Client(Cluster cluster, Configuration conf, boolean sslEnabled, String trustStorePath, + public Client(Cluster cluster, Configuration conf, String trustStorePath, Optional trustStorePassword, Optional trustStoreType) { KeyStore trustStore = loadTruststore(trustStorePath, trustStorePassword, trustStoreType); - initialize(cluster, conf, sslEnabled, false, Optional.of(trustStore), Optional.empty(), + initialize(cluster, conf, true, false, Optional.of(trustStore), Optional.empty(), Optional.empty(), Optional.empty()); } From d85574aa1f456cdd839ed4bd3e9d8c23a89fac0c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 16:22:38 +0800 Subject: [PATCH 378/514] HBASE-28607 Bump requests from 2.31.0 to 2.32.0 in /dev-support/flaky-tests (#5929) updated-dependencies: - dependency-name: requests dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/flaky-tests/python-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/python-requirements.txt b/dev-support/flaky-tests/python-requirements.txt index 5269993fb9ba..73b7c8e11600 100644 --- a/dev-support/flaky-tests/python-requirements.txt +++ b/dev-support/flaky-tests/python-requirements.txt @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -requests==2.31.0 +requests==2.32.0 future==0.18.3 gitpython==3.1.41 rbtools==4.0 From 3b18ba664a6dcde344e13fe9305c272592195c03 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Wed, 22 May 2024 10:05:54 +0200 Subject: [PATCH 379/514] HBASE-28605 Add ErrorProne ban on Hadoop shaded thirdparty jars (#5918) This change results in this error on master at `3a3dd66e21`. ``` [WARNING] Rule 2: de.skuzzle.enforcer.restrictimports.rule.RestrictImports failed with message: Banned imports detected: Reason: Use shaded version in hbase-thirdparty in file: org/apache/hadoop/hbase/backup/TestBackupDelete.java org.apache.hadoop.thirdparty.com.google.common.collect.Sets (Line: 34, Matched by: org.apache.hadoop.thirdparty.**) ``` Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/pom.xml b/pom.xml index b750cac0557c..7d23580698f8 100644 --- a/pom.xml +++ b/pom.xml @@ -2515,6 +2515,7 @@ org.apache.commons.cli.** org.apache.commons.collections.** org.apache.commons.collections4.** + org.apache.hadoop.thirdparty.** From 419666b8eb8a881724fe6f65e8235a4220824e51 Mon Sep 17 00:00:00 2001 From: lixiaobao <977734161@qq.com> Date: Wed, 22 May 2024 18:34:42 +0800 Subject: [PATCH 380/514] HBASE-28577 Remove deprecated methods in KeyValue (#5883) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: lixiaobao Co-authored-by: 李小保 Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/KeyValue.java | 656 ------------------ .../hbase/io/hfile/FixedFileTrailer.java | 11 +- .../hadoop/hbase/TestSerialization.java | 7 +- .../hbase/io/hfile/TestFixedFileTrailer.java | 9 - 4 files changed, 7 insertions(+), 676 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 3661c063e88c..89e91ca80361 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; -import org.apache.hadoop.io.RawComparator; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -87,22 +86,6 @@ public class KeyValue implements ExtendedCell, Cloneable { public static final byte[] COLUMN_FAMILY_DELIM_ARRAY = new byte[] { COLUMN_FAMILY_DELIMITER }; - /** - * Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion of - * KeyValue only. - * @deprecated Use {@link CellComparator#getInstance()} instead. Deprecated for hbase 2.0, remove - * for hbase 3.0. - */ - @Deprecated - public static final KVComparator COMPARATOR = new KVComparator(); - /** - * A {@link KVComparator} for hbase:meta catalog table {@link KeyValue}s. - * @deprecated Use {@link MetaCellComparator#META_COMPARATOR} instead. Deprecated for hbase 2.0, - * remove for hbase 3.0. - */ - @Deprecated - public static final KVComparator META_COMPARATOR = new MetaComparator(); - /** Size of the key length field in bytes */ public static final int KEY_LENGTH_SIZE = Bytes.SIZEOF_INT; @@ -1497,623 +1480,6 @@ public static int getDelimiterInReverse(final byte[] b, final int offset, final return result; } - /** - * A {@link KVComparator} for hbase:meta catalog table {@link KeyValue}s. - * @deprecated : {@link MetaCellComparator#META_COMPARATOR} to be used. Deprecated for hbase 2.0, - * remove for hbase 3.0. - */ - @Deprecated - public static class MetaComparator extends KVComparator { - /** - * Compare key portion of a {@link KeyValue} for keys in hbase:meta table. - */ - @Override - public int compare(final Cell left, final Cell right) { - return PrivateCellUtil.compareKeyIgnoresMvcc(MetaCellComparator.META_COMPARATOR, left, right); - } - - @Override - public int compareOnlyKeyPortion(Cell left, Cell right) { - return compare(left, right); - } - - @Override - public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, - int rlength) { - int leftDelimiter = getDelimiter(left, loffset, llength, HConstants.DELIMITER); - int rightDelimiter = getDelimiter(right, roffset, rlength, HConstants.DELIMITER); - // Compare up to the delimiter - int lpart = (leftDelimiter < 0 ? llength : leftDelimiter - loffset); - int rpart = (rightDelimiter < 0 ? rlength : rightDelimiter - roffset); - int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart); - if (result != 0) { - return result; - } else { - if (leftDelimiter < 0 && rightDelimiter >= 0) { - return -1; - } else if (rightDelimiter < 0 && leftDelimiter >= 0) { - return 1; - } else if (leftDelimiter < 0 && rightDelimiter < 0) { - return 0; - } - } - // Compare middle bit of the row. - // Move past delimiter - leftDelimiter++; - rightDelimiter++; - int leftFarDelimiter = getDelimiterInReverse(left, leftDelimiter, - llength - (leftDelimiter - loffset), HConstants.DELIMITER); - int rightFarDelimiter = getDelimiterInReverse(right, rightDelimiter, - rlength - (rightDelimiter - roffset), HConstants.DELIMITER); - // Now compare middlesection of row. - lpart = (leftFarDelimiter < 0 ? llength + loffset : leftFarDelimiter) - leftDelimiter; - rpart = (rightFarDelimiter < 0 ? rlength + roffset : rightFarDelimiter) - rightDelimiter; - result = super.compareRows(left, leftDelimiter, lpart, right, rightDelimiter, rpart); - if (result != 0) { - return result; - } else { - if (leftDelimiter < 0 && rightDelimiter >= 0) { - return -1; - } else if (rightDelimiter < 0 && leftDelimiter >= 0) { - return 1; - } else if (leftDelimiter < 0 && rightDelimiter < 0) { - return 0; - } - } - // Compare last part of row, the rowid. - leftFarDelimiter++; - rightFarDelimiter++; - result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset), - right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset)); - return result; - } - - /** - * Don't do any fancy Block Index splitting tricks. - */ - @Override - public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { - return Arrays.copyOf(rightKey, rightKey.length); - } - - /** - * The HFileV2 file format's trailer contains this class name. We reinterpret this and - * instantiate the appropriate comparator. TODO: With V3 consider removing this. - * @return legacy class name for FileFileTrailer#comparatorClassName - */ - @Override - public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.KeyValue$MetaKeyComparator"; - } - - @Override - protected MetaComparator clone() throws CloneNotSupportedException { - return (MetaComparator) super.clone(); - } - - /** - * Override the row key comparison to parse and compare the meta row key parts. - */ - @Override - protected int compareRowKey(final Cell l, final Cell r) { - byte[] left = l.getRowArray(); - int loffset = l.getRowOffset(); - int llength = l.getRowLength(); - byte[] right = r.getRowArray(); - int roffset = r.getRowOffset(); - int rlength = r.getRowLength(); - return compareRows(left, loffset, llength, right, roffset, rlength); - } - } - - /** - * Compare KeyValues. When we compare KeyValues, we only compare the Key portion. This means two - * KeyValues with same Key but different Values are considered the same as far as this Comparator - * is concerned. - * @deprecated : Use {@link CellComparatorImpl}. Deprecated for hbase 2.0, remove for hbase 3.0. - */ - @Deprecated - public static class KVComparator implements RawComparator, SamePrefixComparator { - - /** - * The HFileV2 file format's trailer contains this class name. We reinterpret this and - * instantiate the appropriate comparator. TODO: With V3 consider removing this. - * @return legacy class name for FileFileTrailer#comparatorClassName - */ - public String getLegacyKeyComparatorName() { - return "org.apache.hadoop.hbase.KeyValue$KeyComparator"; - } - - @Override // RawComparator - public int compare(byte[] l, int loff, int llen, byte[] r, int roff, int rlen) { - return compareFlatKey(l, loff, llen, r, roff, rlen); - } - - /** - * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. - * @param left left cell to compare row key - * @param right right cell to compare row key - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - protected int compareRowKey(final Cell left, final Cell right) { - return CellComparatorImpl.COMPARATOR.compareRows(left, right); - } - - /** - * Compares left to right assuming that left,loffset,llength and right,roffset,rlength are full - * KVs laid out in a flat byte[]s. - * @param left the left kv serialized byte[] to be compared with - * @param loffset the offset in the left byte[] - * @param llength the length in the left byte[] - * @param right the right kv serialized byte[] to be compared with - * @param roffset the offset in the right byte[] - * @param rlength the length in the right byte[] - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset, - int rlength) { - // Compare row - short lrowlength = Bytes.toShort(left, loffset); - short rrowlength = Bytes.toShort(right, roffset); - int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT, lrowlength, right, - roffset + Bytes.SIZEOF_SHORT, rrowlength); - if (compare != 0) { - return compare; - } - - // Compare the rest of the two KVs without making any assumptions about - // the common prefix. This function will not compare rows anyway, so we - // don't need to tell it that the common prefix includes the row. - return compareWithoutRow(0, left, loffset, llength, right, roffset, rlength, rrowlength); - } - - public int compareFlatKey(byte[] left, byte[] right) { - return compareFlatKey(left, 0, left.length, right, 0, right.length); - } - - // compare a key against row/fam/qual/ts/type - public int compareKey(Cell cell, byte[] row, int roff, int rlen, byte[] fam, int foff, int flen, - byte[] col, int coff, int clen, long ts, byte type) { - - int compare = - compareRows(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, roff, rlen); - if (compare != 0) { - return compare; - } - // If the column is not specified, the "minimum" key type appears the - // latest in the sorted order, regardless of the timestamp. This is used - // for specifying the last key/value in a given row, because there is no - // "lexicographically last column" (it would be infinitely long). The - // "maximum" key type does not need this behavior. - if ( - cell.getFamilyLength() + cell.getQualifierLength() == 0 - && cell.getTypeByte() == Type.Minimum.getCode() - ) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - if (flen + clen == 0 && type == Type.Minimum.getCode()) { - return -1; - } - - compare = compareFamilies(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength(), fam, foff, flen); - if (compare != 0) { - return compare; - } - compare = compareColumns(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), col, coff, clen); - if (compare != 0) { - return compare; - } - // Next compare timestamps. - compare = compareTimestamps(cell.getTimestamp(), ts); - if (compare != 0) { - return compare; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - return (0xff & type) - (0xff & cell.getTypeByte()); - } - - public int compareOnlyKeyPortion(Cell left, Cell right) { - return PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, right); - } - - /** - * Compares the Key of a cell -- with fields being more significant in this order: rowkey, - * colfam/qual, timestamp, type, mvcc - */ - @Override - public int compare(final Cell left, final Cell right) { - int compare = CellComparatorImpl.COMPARATOR.compare(left, right); - return compare; - } - - public int compareTimestamps(final Cell left, final Cell right) { - return CellComparatorImpl.COMPARATOR.compareTimestamps(left, right); - } - - /** - * Compares the rows of a cell - * @param left left cell to compare rows for - * @param right right cell to compare rows for - * @return Result comparing rows. - */ - public int compareRows(final Cell left, final Cell right) { - return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); - } - - /** - * Get the b[],o,l for left and right rowkey portions and compare. - * @param left the left kv serialized byte[] to be compared with - * @param loffset the offset in the left byte[] - * @param llength the length in the left byte[] - * @param right the right kv serialized byte[] to be compared with - * @param roffset the offset in the right byte[] - * @param rlength the length in the right byte[] - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, - int rlength) { - return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); - } - - int compareColumns(final Cell left, final short lrowlength, final Cell right, - final short rrowlength) { - return CellComparatorImpl.COMPARATOR.compareColumns(left, right); - } - - protected int compareColumns(byte[] left, int loffset, int llength, final int lfamilylength, - byte[] right, int roffset, int rlength, final int rfamilylength) { - // Compare family portion first. - int diff = Bytes.compareTo(left, loffset, lfamilylength, right, roffset, rfamilylength); - if (diff != 0) { - return diff; - } - // Compare qualifier portion - return Bytes.compareTo(left, loffset + lfamilylength, llength - lfamilylength, right, - roffset + rfamilylength, rlength - rfamilylength); - } - - static int compareTimestamps(final long ltimestamp, final long rtimestamp) { - // The below older timestamps sorting ahead of newer timestamps looks - // wrong but it is intentional. This way, newer timestamps are first - // found when we iterate over a memstore and newer versions are the - // first we trip over when reading from a store file. - if (ltimestamp < rtimestamp) { - return 1; - } else if (ltimestamp > rtimestamp) { - return -1; - } - return 0; - } - - /** - * Overridden - * @param commonPrefix location of expected common prefix - * @param left the left kv serialized byte[] to be compared with - * @param loffset the offset in the left byte[] - * @param llength the length in the left byte[] - * @param right the right kv serialized byte[] to be compared with - * @param roffset the offset in the byte[] - * @param rlength the length in the right byte[] - * @return 0 if equal, <0 if left smaller, >0 if right smaller - */ - @Override // SamePrefixComparator - public int compareIgnoringPrefix(int commonPrefix, byte[] left, int loffset, int llength, - byte[] right, int roffset, int rlength) { - // Compare row - short lrowlength = Bytes.toShort(left, loffset); - short rrowlength; - - int comparisonResult = 0; - if (commonPrefix < ROW_LENGTH_SIZE) { - // almost nothing in common - rrowlength = Bytes.toShort(right, roffset); - comparisonResult = compareRows(left, loffset + ROW_LENGTH_SIZE, lrowlength, right, - roffset + ROW_LENGTH_SIZE, rrowlength); - } else { // the row length is the same - rrowlength = lrowlength; - if (commonPrefix < ROW_LENGTH_SIZE + rrowlength) { - // The rows are not the same. Exclude the common prefix and compare - // the rest of the two rows. - int common = commonPrefix - ROW_LENGTH_SIZE; - comparisonResult = compareRows(left, loffset + common + ROW_LENGTH_SIZE, - lrowlength - common, right, roffset + common + ROW_LENGTH_SIZE, rrowlength - common); - } - } - if (comparisonResult != 0) { - return comparisonResult; - } - - assert lrowlength == rrowlength; - return compareWithoutRow(commonPrefix, left, loffset, llength, right, roffset, rlength, - lrowlength); - } - - /** - * Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This - * method is used both in the normal comparator and the "same-prefix" comparator. Note that we - * are assuming that row portions of both KVs have already been parsed and found identical, and - * we don't validate that assumption here. the length of the common prefix of the two key-values - * being compared, including row length and row - */ - private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength, - byte[] right, int roffset, int rlength, short rowlength) { - /*** - * KeyValue Format and commonLength: - * |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|.... - * ------------------|-------commonLength--------|-------------- - */ - int commonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rowlength; - - // commonLength + TIMESTAMP_TYPE_SIZE - int commonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + commonLength; - // ColumnFamily + Qualifier length. - int lcolumnlength = llength - commonLengthWithTSAndType; - int rcolumnlength = rlength - commonLengthWithTSAndType; - - byte ltype = left[loffset + (llength - 1)]; - byte rtype = right[roffset + (rlength - 1)]; - - // If the column is not specified, the "minimum" key type appears the - // latest in the sorted order, regardless of the timestamp. This is used - // for specifying the last key/value in a given row, because there is no - // "lexicographically last column" (it would be infinitely long). The - // "maximum" key type does not need this behavior. - if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) { - return -1; - } - - int lfamilyoffset = commonLength + loffset; - int rfamilyoffset = commonLength + roffset; - - // Column family length. - int lfamilylength = left[lfamilyoffset - 1]; - int rfamilylength = right[rfamilyoffset - 1]; - // If left family size is not equal to right family size, we need not - // compare the qualifiers. - boolean sameFamilySize = (lfamilylength == rfamilylength); - int common = 0; - if (commonPrefix > 0) { - common = Math.max(0, commonPrefix - commonLength); - if (!sameFamilySize) { - // Common should not be larger than Math.min(lfamilylength, - // rfamilylength). - common = Math.min(common, Math.min(lfamilylength, rfamilylength)); - } else { - common = Math.min(common, Math.min(lcolumnlength, rcolumnlength)); - } - } - if (!sameFamilySize) { - // comparing column family is enough. - return Bytes.compareTo(left, lfamilyoffset + common, lfamilylength - common, right, - rfamilyoffset + common, rfamilylength - common); - } - // Compare family & qualifier together. - final int comparison = Bytes.compareTo(left, lfamilyoffset + common, lcolumnlength - common, - right, rfamilyoffset + common, rcolumnlength - common); - if (comparison != 0) { - return comparison; - } - - //// - // Next compare timestamps. - long ltimestamp = Bytes.toLong(left, loffset + (llength - TIMESTAMP_TYPE_SIZE)); - long rtimestamp = Bytes.toLong(right, roffset + (rlength - TIMESTAMP_TYPE_SIZE)); - int compare = compareTimestamps(ltimestamp, rtimestamp); - if (compare != 0) { - return compare; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - return (0xff & rtype) - (0xff & ltype); - } - - protected int compareFamilies(final byte[] left, final int loffset, final int lfamilylength, - final byte[] right, final int roffset, final int rfamilylength) { - int diff = Bytes.compareTo(left, loffset, lfamilylength, right, roffset, rfamilylength); - return diff; - } - - protected int compareColumns(final byte[] left, final int loffset, final int lquallength, - final byte[] right, final int roffset, final int rquallength) { - int diff = Bytes.compareTo(left, loffset, lquallength, right, roffset, rquallength); - return diff; - } - - /** - * Compares the row and column of two keyvalues for equality - * @param left left cell to compare row and column - * @param right right cell to compare row and column - * @return True if same row and column. - */ - public boolean matchingRowColumn(final Cell left, final Cell right) { - short lrowlength = left.getRowLength(); - short rrowlength = right.getRowLength(); - - // TsOffset = end of column data. just comparing Row+CF length of each - if ( - (left.getRowLength() + left.getFamilyLength() + left.getQualifierLength()) - != (right.getRowLength() + right.getFamilyLength() + right.getQualifierLength()) - ) { - return false; - } - - if (!matchingRows(left, lrowlength, right, rrowlength)) { - return false; - } - - int lfoffset = left.getFamilyOffset(); - int rfoffset = right.getFamilyOffset(); - int lclength = left.getQualifierLength(); - int rclength = right.getQualifierLength(); - int lfamilylength = left.getFamilyLength(); - int rfamilylength = right.getFamilyLength(); - int diff = compareFamilies(left.getFamilyArray(), lfoffset, lfamilylength, - right.getFamilyArray(), rfoffset, rfamilylength); - if (diff != 0) { - return false; - } else { - diff = compareColumns(left.getQualifierArray(), left.getQualifierOffset(), lclength, - right.getQualifierArray(), right.getQualifierOffset(), rclength); - return diff == 0; - } - } - - /** - * Compares the row of two keyvalues for equality - * @param left left cell to compare row - * @param right right cell to compare row - * @return True if rows match. - */ - public boolean matchingRows(final Cell left, final Cell right) { - short lrowlength = left.getRowLength(); - short rrowlength = right.getRowLength(); - return matchingRows(left, lrowlength, right, rrowlength); - } - - /** - * Compares the row of two keyvalues for equality - * @param left left cell to compare row - * @param lrowlength left row length - * @param right right cell to compare row - * @param rrowlength right row length - * @return True if rows match. - */ - private boolean matchingRows(final Cell left, final short lrowlength, final Cell right, - final short rrowlength) { - return lrowlength == rrowlength && matchingRows(left.getRowArray(), left.getRowOffset(), - lrowlength, right.getRowArray(), right.getRowOffset(), rrowlength); - } - - /** - * Compare rows. Just calls Bytes.equals, but it's good to have this encapsulated. - * @param left Left row array. - * @param loffset Left row offset. - * @param llength Left row length. - * @param right Right row array. - * @param roffset Right row offset. - * @param rlength Right row length. - * @return Whether rows are the same row. - */ - public boolean matchingRows(final byte[] left, final int loffset, final int llength, - final byte[] right, final int roffset, final int rlength) { - return Bytes.equals(left, loffset, llength, right, roffset, rlength); - } - - public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock) { - byte[] fakeKey = getShortMidpointKey(lastKeyOfPreviousBlock, firstKeyInBlock); - if (compareFlatKey(fakeKey, firstKeyInBlock) > 0) { - LOG.error("Unexpected getShortMidpointKey result, fakeKey:" + Bytes.toStringBinary(fakeKey) - + ", firstKeyInBlock:" + Bytes.toStringBinary(firstKeyInBlock)); - return firstKeyInBlock; - } - if (lastKeyOfPreviousBlock != null && compareFlatKey(lastKeyOfPreviousBlock, fakeKey) >= 0) { - LOG.error("Unexpected getShortMidpointKey result, lastKeyOfPreviousBlock:" - + Bytes.toStringBinary(lastKeyOfPreviousBlock) + ", fakeKey:" - + Bytes.toStringBinary(fakeKey)); - return firstKeyInBlock; - } - return fakeKey; - } - - /** - * This is a HFile block index key optimization. - * @param leftKey byte array for left Key - * @param rightKey byte array for right Key - * @return 0 if equal, <0 if left smaller, >0 if right smaller - * @deprecated Since 0.99.2; - */ - @Deprecated - public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { - if (rightKey == null) { - throw new IllegalArgumentException("rightKey can not be null"); - } - if (leftKey == null) { - return Arrays.copyOf(rightKey, rightKey.length); - } - if (compareFlatKey(leftKey, rightKey) >= 0) { - throw new IllegalArgumentException("Unexpected input, leftKey:" + Bytes.toString(leftKey) - + ", rightKey:" + Bytes.toString(rightKey)); - } - - short leftRowLength = Bytes.toShort(leftKey, 0); - short rightRowLength = Bytes.toShort(rightKey, 0); - int leftCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + leftRowLength; - int rightCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rightRowLength; - int leftCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + leftCommonLength; - int rightCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + rightCommonLength; - int leftColumnLength = leftKey.length - leftCommonLengthWithTSAndType; - int rightColumnLength = rightKey.length - rightCommonLengthWithTSAndType; - // rows are equal - if ( - leftRowLength == rightRowLength && compareRows(leftKey, ROW_LENGTH_SIZE, leftRowLength, - rightKey, ROW_LENGTH_SIZE, rightRowLength) == 0 - ) { - // Compare family & qualifier together. - int comparison = Bytes.compareTo(leftKey, leftCommonLength, leftColumnLength, rightKey, - rightCommonLength, rightColumnLength); - // same with "row + family + qualifier", return rightKey directly - if (comparison == 0) { - return Arrays.copyOf(rightKey, rightKey.length); - } - // "family + qualifier" are different, generate a faked key per rightKey - byte[] newKey = Arrays.copyOf(rightKey, rightKey.length); - Bytes.putLong(newKey, rightKey.length - TIMESTAMP_TYPE_SIZE, HConstants.LATEST_TIMESTAMP); - Bytes.putByte(newKey, rightKey.length - TYPE_SIZE, Type.Maximum.getCode()); - return newKey; - } - // rows are different - short minLength = leftRowLength < rightRowLength ? leftRowLength : rightRowLength; - short diffIdx = 0; - while ( - diffIdx < minLength - && leftKey[ROW_LENGTH_SIZE + diffIdx] == rightKey[ROW_LENGTH_SIZE + diffIdx] - ) { - diffIdx++; - } - byte[] newRowKey = null; - if (diffIdx >= minLength) { - // leftKey's row is prefix of rightKey's. - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); - } else { - int diffByte = leftKey[ROW_LENGTH_SIZE + diffIdx]; - if ( - (0xff & diffByte) < 0xff && (diffByte + 1) < (rightKey[ROW_LENGTH_SIZE + diffIdx] & 0xff) - ) { - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(leftKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx); - newRowKey[diffIdx] = (byte) (diffByte + 1); - } else { - newRowKey = new byte[diffIdx + 1]; - System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1); - } - } - return new KeyValue(newRowKey, null, null, HConstants.LATEST_TIMESTAMP, Type.Maximum) - .getKey(); - } - - @Override - protected KVComparator clone() throws CloneNotSupportedException { - return (KVComparator) super.clone(); - } - - } - /** * Create a KeyValue reading from in * @param in Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes @@ -2166,28 +1532,6 @@ public static long write(final KeyValue kv, final DataOutput out) throws IOExcep return (long) length + Bytes.SIZEOF_INT; } - /** - * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do not - * require a {@link DataOutput}, just take plain {@link OutputStream} Named oswrite - * so does not clash with {@link #write(KeyValue, DataOutput)} - * @param kv the KeyValue on which write is being requested - * @param out OutputStream to write keyValue to - * @param withTags boolean value indicating write is with Tags or not - * @return Length written on stream - * @throws IOException if any IO error happen - * @see #create(DataInput) for the inverse function - * @see #write(KeyValue, DataOutput) - * @see KeyValueUtil#oswrite(Cell, OutputStream, boolean) - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use - * {@link #write(OutputStream, boolean)} - */ - @Deprecated - public static long oswrite(final KeyValue kv, final OutputStream out, final boolean withTags) - throws IOException { - ByteBufferUtils.putInt(out, kv.getSerializedSize(withTags)); - return (long) kv.write(out, withTags) + Bytes.SIZEOF_INT; - } - @Override public int write(OutputStream out, boolean withTags) throws IOException { int len = getSerializedSize(withTags); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 861a68970577..72340eeead4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.InnerStoreCellComparator; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaCellComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.util.Bytes; @@ -555,16 +554,10 @@ private static Class getComparatorClass(String compara throws IOException { Class comparatorKlass; // for BC - if ( - comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")) - ) { + if (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")) { comparatorKlass = InnerStoreCellComparator.class; } else if ( - comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator") || (comparatorClassName .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 98a3c01a8e43..dc4ecd1ff8e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.DataInputBuffer; @@ -102,8 +103,10 @@ public void testCreateKeyValueInvalidNegativeLength() { long l = 0; try { - l = KeyValue.oswrite(kv_0, dos, false); - l += KeyValue.oswrite(kv_1, dos, false); + ByteBufferUtils.putInt(dos, kv_0.getSerializedSize(false)); + l = (long) kv_0.write(dos, false) + Bytes.SIZEOF_INT; + ByteBufferUtils.putInt(dos, kv_1.getSerializedSize(false)); + l += (long) kv_1.write(dos, false) + Bytes.SIZEOF_INT; assertEquals(100L, l); } catch (IOException e) { fail("Unexpected IOException" + e.getMessage()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 3bad8d46a149..5c4767b2a677 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.InnerStoreCellComparator; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaCellComparator; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -101,16 +100,8 @@ public void setUp() throws IOException { @Test public void testCreateComparator() throws IOException { - assertEquals(InnerStoreCellComparator.class, - createComparator(KeyValue.COMPARATOR.getLegacyKeyComparatorName()).getClass()); - assertEquals(InnerStoreCellComparator.class, - createComparator(KeyValue.COMPARATOR.getClass().getName()).getClass()); assertEquals(InnerStoreCellComparator.class, createComparator(CellComparator.class.getName()).getClass()); - assertEquals(MetaCellComparator.class, - createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); - assertEquals(MetaCellComparator.class, - createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); assertEquals(MetaCellComparator.class, createComparator("org.apache.hadoop.hbase.CellComparator$MetaCellComparator").getClass()); assertEquals(MetaCellComparator.class, From e5b581686dd772a45885f34aaa90d7c4ce3474af Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Wed, 22 May 2024 16:28:33 +0100 Subject: [PATCH 381/514] HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286) Signed-off-by: Tak Lon (Stephen) Wu --- dev-support/hbase_docker/README.md | 3 + dev-support/hbase_docker/m1/Dockerfile | 92 ++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 dev-support/hbase_docker/m1/Dockerfile diff --git a/dev-support/hbase_docker/README.md b/dev-support/hbase_docker/README.md index d98f7ada98b2..3d0641afaee9 100644 --- a/dev-support/hbase_docker/README.md +++ b/dev-support/hbase_docker/README.md @@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell when run. **hbase_docker** image. Alternatively, you can type `docker run -it hbase_docker bash` to start a container without a running HMaster. Within this environment, HBase is built in `/root/hbase-bin`. + +> NOTE: When running on mac m1 platforms, the docker file requires setting platfrom flag explicitly. +> You may use same instructions above running from to the "./m1" sub-dir. diff --git a/dev-support/hbase_docker/m1/Dockerfile b/dev-support/hbase_docker/m1/Dockerfile new file mode 100644 index 000000000000..5399fa0e5af6 --- /dev/null +++ b/dev-support/hbase_docker/m1/Dockerfile @@ -0,0 +1,92 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM --platform=linux/amd64 ubuntu:22.04 AS base_image +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \ + DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends -y \ + ca-certificates=20211016 \ + curl='7.81.0-*' \ + git='1:2.34.1-*' \ + locales='2.35-*' \ + && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* \ + && \ + locale-gen en_US.UTF-8 +ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 + +FROM base_image AS maven_download_image +ENV MAVEN_VERSION='3.8.6' +ENV MAVEN_URL "https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz" +ENV MAVEN_SHA512 'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26' +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz "${MAVEN_URL}" && \ + echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c - + +FROM base_image AS openjdk8_download_image +ENV OPENJDK8_URL 'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz' +ENV OPENJDK8_SHA256 '1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee' +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \ + echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c - + +FROM base_image +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# +# when updating java or maven versions here, consider also updating +# `dev-support/docker/Dockerfile` as well. +# + +# hadolint ignore=DL3010 +COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz +RUN tar xzf /tmp/maven.tar.gz -C /opt && \ + ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven && \ + rm /tmp/maven.tar.gz + +# hadolint ignore=DL3010 +COPY --from=openjdk8_download_image /tmp/adoptopenjdk8.tar.gz /tmp/adoptopenjdk8.tar.gz +RUN mkdir -p /usr/lib/jvm && \ + tar xzf /tmp/adoptopenjdk8.tar.gz -C /usr/lib/jvm && \ + ln -s "/usr/lib/jvm/$(basename "$(tar -tf /tmp/adoptopenjdk8.tar.gz | head -n1)")" /usr/lib/jvm/java-8-adoptopenjdk && \ + ln -s /usr/lib/jvm/java-8-adoptopenjdk /usr/lib/jvm/java-8 && \ + rm /tmp/adoptopenjdk8.tar.gz + +ENV MAVEN_HOME '/opt/maven' +ENV JAVA_HOME '/usr/lib/jvm/java-8' +ENV PATH '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' +ENV PATH "${JAVA_HOME}/bin:${MAVEN_HOME}/bin:${PATH}" + +# Pull down HBase and build it into /root/hbase-bin. +WORKDIR /root +ARG BRANCH_OR_TAG=master +RUN git clone --depth 1 -b ${BRANCH_OR_TAG} https://github.com/apache/hbase.git \ + && \ + mvn -T1C clean install -DskipTests assembly:single -f ./hbase/pom.xml \ + && \ + mkdir -p hbase-bin \ + && \ + find /root/hbase/hbase-assembly/target -iname '*.tar.gz' -not -iname '*client*' \ + | head -n 1 \ + | xargs -I{} tar xzf {} --strip-components 1 -C /root/hbase-bin + +# Set HBASE_HOME, add it to the path, and start HBase. +ENV HBASE_HOME /root/hbase-bin +ENV PATH "/root/hbase-bin/bin:${PATH}" + +CMD ["/bin/bash", "-c", "start-hbase.sh; hbase shell"] From 65ff7a228e7b5ec0e1b0450dc56d530f1c795d52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 May 2024 22:43:43 +0800 Subject: [PATCH 382/514] HBASE-28615 Bump requests from 2.31.0 to 2.32.2 in /dev-support/git-jira-release-audit (#5931) updated-dependencies: - dependency-name: requests dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 606608797965..01328a1b49b4 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -30,7 +30,7 @@ oauthlib==3.1.0 pbr==5.4.4 pycparser==2.19 PyJWT==2.4.0 -requests==2.31.0 +requests==2.32.2 requests-oauthlib==1.3.0 requests-toolbelt==0.9.1 six==1.14.0 From c5e6d82bf4300b25095d9e80030d7c669a166768 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 24 May 2024 22:54:56 +0800 Subject: [PATCH 383/514] HBASE-28425 Allow specify cluster key without zookeeper in replication (#5865) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Nick Dimiduk Reviewed-by: Andor Molnár --- .../client/ConnectionRegistryFactory.java | 46 +++++++++- .../client/ConnectionRegistryURIFactory.java | 6 ++ .../hbase/client/RawAsyncHBaseAdmin.java | 11 ++- .../RpcConnectionRegistryURIFactory.java | 9 ++ .../ZKConnectionRegistryURIFactory.java | 12 +++ .../ReplicationPeerConfigUtil.java | 15 +-- .../hadoop/hbase/HBaseConfiguration.java | 5 +- .../replication/VerifyReplication.java | 4 +- .../replication/TestVerifyReplication.java | 8 ++ .../TestVerifyReplicationAdjunct.java | 8 ++ .../hbase/replication/ReplicationPeers.java | 17 +++- .../hbase/replication/ReplicationUtils.java | 21 ----- .../TestReplicationStateBasic.java | 5 +- .../client/ClusterConnectionFactory.java | 15 +++ .../replication/ReplicationPeerManager.java | 92 ++++++++++++------- .../replication/HBaseReplicationEndpoint.java | 16 +++- .../apache/hadoop/hbase/HBaseTestingUtil.java | 18 ++++ .../hadoop/hbase/client/TestAdmin4.java | 2 +- .../client/TestAsyncReplicationAdminApi.java | 4 +- ...tAsyncReplicationAdminApiWithClusters.java | 2 +- .../hbase/client/TestReplicaWithCluster.java | 2 +- ...estReplicationAdminForSyncReplication.java | 2 +- .../cleaner/TestReplicationHFileCleaner.java | 4 +- .../TestDisablePeerModification.java | 2 +- .../regionserver/TestBulkLoadReplication.java | 8 +- .../TestBulkLoadReplicationHFileRefs.java | 6 +- .../replication/SyncReplicationTestBase.java | 4 +- .../TestHBaseReplicationEndpoint.java | 26 +++--- .../replication/TestMasterReplication.java | 4 +- ...estMigrateRepliationPeerStorageOnline.java | 2 +- .../TestMultiSlaveReplication.java | 4 +- ...amespaceReplicationWithBulkLoadedData.java | 4 +- .../TestPerTableCFReplication.java | 4 +- .../replication/TestReplicationBase.java | 7 +- .../TestReplicationDisableInactivePeer.java | 8 ++ ...cationEditsDroppedWithDeletedTableCFs.java | 2 +- ...plicationEditsDroppedWithDroppedTable.java | 2 +- .../TestReplicationSmallTests.java | 2 +- .../TestReplicationStatusAfterLagging.java | 8 ++ ...stReplicationStuckWithDeletedTableCFs.java | 2 +- .../TestReplicationStuckWithDroppedTable.java | 2 +- .../TestReplicationSyncUpToolBase.java | 6 +- .../replication/TestReplicationWithTags.java | 2 +- ...tReplicationWithWALExtendedAttributes.java | 2 +- .../TestSyncReplicationStandbyKillRS.java | 3 +- .../TestGlobalReplicationThrottler.java | 61 +++++------- ...ClusterReplicationEndpointFilterEdits.java | 1 + .../regionserver/TestReplicationMarker.java | 2 +- .../regionserver/TestReplicationSource.java | 4 +- .../regionserver/TestReplicator.java | 4 +- ...bilityLabelReplicationWithExpAsString.java | 2 +- .../TestVisibilityLabelsReplication.java | 2 +- ...TestHBaseFsckCleanReplicationBarriers.java | 2 +- .../hadoop/hbase/HBaseTestingUtility.java | 18 ++++ 54 files changed, 352 insertions(+), 178 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index af4cf75af7fa..cc8d2c6f50bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.net.URI; +import java.net.URISyntaxException; +import java.util.Locale; import java.util.ServiceLoader; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -35,7 +37,7 @@ * The entry point for creating a {@link ConnectionRegistry}. */ @InterfaceAudience.Private -final class ConnectionRegistryFactory { +public final class ConnectionRegistryFactory { private static final Logger LOG = LoggerFactory.getLogger(ConnectionRegistryFactory.class); @@ -90,4 +92,46 @@ static ConnectionRegistry create(Configuration conf, User user) { RpcConnectionRegistry.class, ConnectionRegistry.class); return ReflectionUtils.newInstance(clazz, conf, user); } + + /** + * Check whether the given {@code uri} is valid. + *

    + * Notice that there is no fallback logic for this method, so passing an URI with null scheme can + * not pass. + * @throws IOException if this is not a valid connection registry URI + */ + public static void validate(URI uri) throws IOException { + if (StringUtils.isBlank(uri.getScheme())) { + throw new IOException("No schema for uri: " + uri); + } + ConnectionRegistryURIFactory factory = FACTORIES.get(uri.getScheme().toLowerCase(Locale.ROOT)); + if (factory == null) { + throw new IOException( + "No factory registered for scheme " + uri.getScheme() + ", uri: " + uri); + } + factory.validate(uri); + } + + /** + * If the given {@code clusterKey} can be parsed to a {@link URI}, and the scheme of the + * {@link URI} is supported by us, return the {@link URI}, otherwise return {@code null}. + * @param clusterKey the cluster key, typically from replication peer config + * @return a {@link URI} or {@code null}. + */ + public static URI tryParseAsConnectionURI(String clusterKey) { + // The old cluster key format may not be parsed as URI if we use ip address as the zookeeper + // address, so here we need to catch the URISyntaxException and return false + URI uri; + try { + uri = new URI(clusterKey); + } catch (URISyntaxException e) { + LOG.debug("failed to parse cluster key to URI: {}", clusterKey, e); + return null; + } + if (FACTORIES.containsKey(uri.getScheme())) { + return uri; + } else { + return null; + } + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java index ab2037a1c138..e5ee4c2321df 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java @@ -39,4 +39,10 @@ public interface ConnectionRegistryURIFactory { * {@link ConnectionRegistryFactory}. */ String getScheme(); + + /** + * Validate the given {@code uri}. + * @throws IOException if this is not a valid connection registry URI. + */ + void validate(URI uri) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 103a64e520a1..5822657fd88b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -24,6 +24,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -3785,15 +3786,17 @@ private CompletableFuture checkAndSyncTableToPeerClusters(TableName tableN private CompletableFuture trySyncTableToPeerCluster(TableName tableName, byte[][] splits, ReplicationPeerDescription peer) { - Configuration peerConf = null; + Configuration peerConf; try { - peerConf = - ReplicationPeerConfigUtil.getPeerClusterConfiguration(connection.getConfiguration(), peer); + peerConf = ReplicationPeerConfigUtil + .getPeerClusterConfiguration(connection.getConfiguration(), peer.getPeerConfig()); } catch (IOException e) { return failedFuture(e); } + URI connectionUri = + ConnectionRegistryFactory.tryParseAsConnectionURI(peer.getPeerConfig().getClusterKey()); CompletableFuture future = new CompletableFuture<>(); - addListener(ConnectionFactory.createAsyncConnection(peerConf), (conn, err) -> { + addListener(ConnectionFactory.createAsyncConnection(connectionUri, peerConf), (conn, err) -> { if (err != null) { future.completeExceptionally(err); return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java index 79081ee6c649..064b6ef226a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryURIFactory.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.URI; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; @@ -46,4 +47,12 @@ public ConnectionRegistry create(URI uri, Configuration conf, User user) throws public String getScheme() { return "hbase+rpc"; } + + @Override + public void validate(URI uri) throws IOException { + if (StringUtils.isBlank(uri.getAuthority())) { + throw new IOException("no bootstrap nodes specified, uri: " + uri); + } + // TODO: add more check about the bootstrap nodes + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java index 939adab23b78..86af8d91d2d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryURIFactory.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.URI; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.security.User; @@ -49,4 +50,15 @@ public ConnectionRegistry create(URI uri, Configuration conf, User user) throws public String getScheme() { return "hbase+zk"; } + + @Override + public void validate(URI uri) throws IOException { + if (StringUtils.isBlank(uri.getAuthority())) { + throw new IOException("no zookeeper quorum specified, uri: " + uri); + } + // TODO: add more check about the zookeeper quorum + if (StringUtils.isBlank(uri.getPath())) { + throw new IOException("no zookeeper parent path specified, uri: " + uri); + } + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index 2fc5fa3c1152..57be558fb492 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ConnectionRegistryFactory; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; @@ -629,19 +630,19 @@ public static ReplicationPeerConfig removeExcludeTableCFsFromReplicationPeerConf /** * Returns the configuration needed to talk to the remote slave cluster. - * @param conf the base configuration - * @param peer the description of replication peer + * @param conf the base configuration + * @param peerConfig the peer config of replication peer * @return the configuration for the peer cluster, null if it was unable to get the configuration * @throws IOException when create peer cluster configuration failed */ public static Configuration getPeerClusterConfiguration(Configuration conf, - ReplicationPeerDescription peer) throws IOException { - ReplicationPeerConfig peerConfig = peer.getPeerConfig(); + ReplicationPeerConfig peerConfig) throws IOException { Configuration otherConf; - try { + if (ConnectionRegistryFactory.tryParseAsConnectionURI(peerConfig.getClusterKey()) != null) { + otherConf = HBaseConfiguration.create(conf); + } else { + // only need to apply cluster key for old style cluster key otherConf = HBaseConfiguration.createClusterConf(conf, peerConfig.getClusterKey()); - } catch (IOException e) { - throw new IOException("Can't get peer configuration for peerId=" + peer.getPeerId(), e); } if (!peerConfig.getConfiguration().isEmpty()) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 5fc030581dad..c554b5f40526 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.Map; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.zookeeper.ZKConfig; @@ -226,11 +227,11 @@ public static Configuration createClusterConf(Configuration baseConf, String clu public static Configuration createClusterConf(Configuration baseConf, String clusterKey, String overridePrefix) throws IOException { Configuration clusterConf = HBaseConfiguration.create(baseConf); - if (clusterKey != null && !clusterKey.isEmpty()) { + if (!StringUtils.isBlank(clusterKey)) { applyClusterKeyToConf(clusterConf, clusterKey); } - if (overridePrefix != null && !overridePrefix.isEmpty()) { + if (!StringUtils.isBlank(overridePrefix)) { Configuration clusterSubset = HBaseConfiguration.subset(clusterConf, overridePrefix); HBaseConfiguration.merge(clusterConf, clusterSubset); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 6e3650297bd3..d83fa1d52522 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableSnapshotScanner; +import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.PrefixFilter; @@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; -import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -397,7 +397,7 @@ public boolean isAborted() { ReplicationStorageFactory.getReplicationPeerStorage(FileSystem.get(conf), localZKW, conf); ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId); return Pair.newPair(peerConfig, - ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); + ReplicationPeerConfigUtil.getPeerClusterConfiguration(conf, peerConfig)); } catch (ReplicationException e) { throw new IOException("An error occurred while trying to connect to the remote peer cluster", e); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index 2958c5ef9114..c7b0ed4c4b05 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -86,6 +86,14 @@ public class TestVerifyReplication extends TestReplicationBase { @Rule public TestName name = new TestName(); + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + // TODO: VerifyReplication does not support connection uri yet, so here we need to use cluster + // key, as in this test we will pass the cluster key config in peer config directly to + // VerifyReplication job. + return util.getClusterKey(); + } + @Before public void setUp() throws Exception { cleanUp(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index d78b2f2e2edd..7044b002a5eb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -81,6 +81,14 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { @Rule public TestName name = new TestName(); + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + // TODO: VerifyReplication does not support connection uri yet, so here we need to use cluster + // key, as in this test we will pass the cluster key config in peer config directly to + // VerifyReplication job. + return util.getClusterKey(); + } + @Before public void setUp() throws Exception { cleanUp(); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 56b86a6f9d13..34da4d237c20 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.replication; +import java.io.IOException; import java.util.Collections; import java.util.Map; import java.util.Set; @@ -24,6 +25,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -155,8 +157,15 @@ private ReplicationPeerImpl createPeer(String peerId) throws ReplicationExceptio SyncReplicationState syncReplicationState = peerStorage.getPeerSyncReplicationState(peerId); SyncReplicationState newSyncReplicationState = peerStorage.getPeerNewSyncReplicationState(peerId); - return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf), - peerId, peerConfig, enabled, syncReplicationState, newSyncReplicationState); + Configuration peerClusterConf; + try { + peerClusterConf = ReplicationPeerConfigUtil.getPeerClusterConfiguration(conf, peerConfig); + } catch (IOException e) { + throw new ReplicationException( + "failed to apply cluster key to configuration for peer config " + peerConfig, e); + } + return new ReplicationPeerImpl(peerClusterConf, peerId, peerConfig, enabled, + syncReplicationState, newSyncReplicationState); } @Override @@ -166,8 +175,8 @@ public void onConfigurationChange(Configuration conf) { for (ReplicationPeerImpl peer : peerCache.values()) { try { peer.onConfigurationChange( - ReplicationUtils.getPeerClusterConfiguration(peer.getPeerConfig(), conf)); - } catch (ReplicationException e) { + ReplicationPeerConfigUtil.getPeerClusterConfiguration(conf, peer.getPeerConfig())); + } catch (IOException e) { LOG.warn("failed to reload configuration for peer {}", peer.getId(), e); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java index ae78781a3133..46a59f896922 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java @@ -25,8 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -61,25 +59,6 @@ public final class ReplicationUtils { private ReplicationUtils() { } - public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig peerConfig, - Configuration baseConf) throws ReplicationException { - Configuration otherConf; - try { - otherConf = HBaseConfiguration.createClusterConf(baseConf, peerConfig.getClusterKey()); - } catch (IOException e) { - throw new ReplicationException("Can't get peer configuration for peer " + peerConfig, e); - } - - if (!peerConfig.getConfiguration().isEmpty()) { - CompoundConfiguration compound = new CompoundConfiguration(); - compound.add(otherConf); - compound.addStringMap(peerConfig.getConfiguration()); - return compound; - } - - return otherConf; - } - private static boolean isCollectionEqual(Collection c1, Collection c2) { if (c1 == null) { return c2 == null; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index dc46e4f1c7c8..bc843eb297c1 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -23,6 +23,7 @@ import static org.junit.Assert.fail; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.junit.Test; @@ -86,8 +87,8 @@ public void testReplicationPeers() throws Exception { SyncReplicationState.NONE); assertNumberOfPeers(2); - assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(ReplicationUtils - .getPeerClusterConfiguration(rp.getPeerStorage().getPeerConfig(ID_ONE), rp.getConf()))); + assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(ReplicationPeerConfigUtil + .getPeerClusterConfiguration(rp.getConf(), rp.getPeerStorage().getPeerConfig(ID_ONE)))); rp.getPeerStorage().removePeer(ID_ONE); rp.removePeer(ID_ONE); assertNumberOfPeers(1); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java index ed90863763a7..70a1e703c667 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.SocketAddress; +import java.net.URI; import java.security.PrivilegedExceptionAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.User; @@ -68,6 +69,20 @@ public static AsyncClusterConnection createAsyncClusterConnection(Configuration localAddress, user); } + /** + * Create a new {@link AsyncClusterConnection} instance. + *

    + * This is usually used in replication, the given {@code uri} specifies the connection info of the + * remote cluster. + */ + public static AsyncClusterConnection createAsyncClusterConnection(URI uri, Configuration conf, + SocketAddress localAddress, User user) throws IOException { + ConnectionRegistry registry = uri != null + ? ConnectionRegistryFactory.create(uri, conf, user) + : ConnectionRegistryFactory.create(conf, user); + return createAsyncClusterConnection(conf, registry, localAddress, user); + } + /** * Create a new {@link AsyncClusterConnection} instance to be used at server side where we have a * {@link ConnectionRegistryEndpoint}. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 322b5bb7fc78..ac9491834ae8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -39,11 +39,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistryFactory; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.master.MasterServices; @@ -402,6 +407,57 @@ public void removeAllQueuesAndHFileRefs(String peerId) throws ReplicationExcepti queueStorage.removePeerFromHFileRefs(peerId); } + private void checkClusterKey(String clusterKey, ReplicationEndpoint endpoint) + throws DoNotRetryIOException { + if (endpoint != null && !(endpoint instanceof HBaseReplicationEndpoint)) { + return; + } + // Endpoints implementing HBaseReplicationEndpoint need to check cluster key + URI connectionUri = ConnectionRegistryFactory.tryParseAsConnectionURI(clusterKey); + try { + if (connectionUri != null) { + ConnectionRegistryFactory.validate(connectionUri); + } else { + ZKConfig.validateClusterKey(clusterKey); + } + } catch (IOException e) { + throw new DoNotRetryIOException("Invalid cluster key: " + clusterKey, e); + } + if (endpoint != null && endpoint.canReplicateToSameCluster()) { + return; + } + // make sure we do not replicate to same cluster + String peerClusterId; + try { + if (connectionUri != null) { + // fetch cluster id through standard admin API + try (Connection conn = ConnectionFactory.createConnection(connectionUri, conf); + Admin admin = conn.getAdmin()) { + peerClusterId = + admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.CLUSTER_ID)).getClusterId(); + } + } else { + // Create the peer cluster config for get peer cluster id + Configuration peerConf = HBaseConfiguration.createClusterConf(conf, clusterKey); + try (ZKWatcher zkWatcher = new ZKWatcher(peerConf, this + "check-peer-cluster-id", null)) { + peerClusterId = ZKClusterId.readClusterIdZNode(zkWatcher); + } + } + } catch (IOException | KeeperException e) { + // we just want to check whether we will replicate to the same cluster, so if we get an error + // while getting the cluster id of the peer cluster, it means we are not connecting to + // ourselves, as we are still alive. So here we just log the error and continue + LOG.warn("Can't get peerClusterId for clusterKey=" + clusterKey, e); + return; + } + // In rare case, zookeeper setting may be messed up. That leads to the incorrect + // peerClusterId value, which is the same as the source clusterId + if (clusterId.equals(peerClusterId)) { + throw new DoNotRetryIOException("Invalid cluster key: " + clusterKey + + ", should not replicate to itself for HBaseInterClusterReplicationEndpoint"); + } + } + private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws DoNotRetryIOException { String replicationEndpointImpl = peerConfig.getReplicationEndpointImpl(); ReplicationEndpoint endpoint = null; @@ -416,14 +472,7 @@ private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws DoNotRetry e); } } - // Endpoints implementing HBaseReplicationEndpoint need to check cluster key - if (endpoint == null || endpoint instanceof HBaseReplicationEndpoint) { - checkClusterKey(peerConfig.getClusterKey()); - // Check if endpoint can replicate to the same cluster - if (endpoint == null || !endpoint.canReplicateToSameCluster()) { - checkSameClusterKey(peerConfig.getClusterKey()); - } - } + checkClusterKey(peerConfig.getClusterKey(), endpoint); if (peerConfig.replicateAllUserTables()) { // If replicate_all flag is true, it means all user tables will be replicated to peer cluster. @@ -563,33 +612,6 @@ private void checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig) } } - private void checkClusterKey(String clusterKey) throws DoNotRetryIOException { - try { - ZKConfig.validateClusterKey(clusterKey); - } catch (IOException e) { - throw new DoNotRetryIOException("Invalid cluster key: " + clusterKey, e); - } - } - - private void checkSameClusterKey(String clusterKey) throws DoNotRetryIOException { - String peerClusterId = ""; - try { - // Create the peer cluster config for get peer cluster id - Configuration peerConf = HBaseConfiguration.createClusterConf(conf, clusterKey); - try (ZKWatcher zkWatcher = new ZKWatcher(peerConf, this + "check-peer-cluster-id", null)) { - peerClusterId = ZKClusterId.readClusterIdZNode(zkWatcher); - } - } catch (IOException | KeeperException e) { - throw new DoNotRetryIOException("Can't get peerClusterId for clusterKey=" + clusterKey, e); - } - // In rare case, zookeeper setting may be messed up. That leads to the incorrect - // peerClusterId value, which is the same as the source clusterId - if (clusterId.equals(peerClusterId)) { - throw new DoNotRetryIOException("Invalid cluster key: " + clusterKey - + ", should not replicate to itself for HBaseInterClusterReplicationEndpoint"); - } - } - public List getSerialPeerIdsBelongsTo(TableName tableName) { return peers.values().stream().filter(p -> p.getPeerConfig().isSerial()) .filter(p -> p.getPeerConfig().needToReplicate(tableName)).map(p -> p.getPeerId()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index f0ea993a41ba..b85ce2bad47d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import java.io.IOException; +import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistryFactory; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.ReservoirSample; @@ -55,6 +57,8 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint protected Configuration conf; + private URI clusterURI; + private final Object connLock = new Object(); private volatile AsyncClusterConnection conn; @@ -82,19 +86,23 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private List sinkServers = new ArrayList<>(0); - /* + /** * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different * Connection implementations, or initialize it in a different way, so defining createConnection * as protected for possible overridings. */ - protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { - return ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent()); + protected AsyncClusterConnection createConnection(URI clusterURI, Configuration conf) + throws IOException { + return ClusterConnectionFactory.createAsyncClusterConnection(clusterURI, conf, null, + User.getCurrent()); } @Override public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); + this.clusterURI = ConnectionRegistryFactory + .tryParseAsConnectionURI(context.getReplicationPeer().getPeerConfig().getClusterKey()); this.ratio = ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); this.badSinkThreshold = @@ -167,7 +175,7 @@ private AsyncClusterConnection connect() throws IOException { if (c != null) { return c; } - c = createConnection(this.conf); + c = createConnection(clusterURI, conf); conn = c; } return c; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index dcdf55a945b5..fd5b7dd729e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -3216,6 +3216,24 @@ public static String safeGetAsStr(List lst, int i) { } } + public String getRpcConnnectionURI() throws UnknownHostException { + return "hbase+rpc://" + MasterRegistry.getMasterAddr(conf); + } + + public String getZkConnectionURI() { + return "hbase+zk://" + conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + } + + /** + * Get the zk based cluster key for this cluster. + * @deprecated since 2.7.0, will be removed in 4.0.0. Now we use connection uri to specify the + * connection info of a cluster. Keep here only for compatibility. + * @see #getRpcConnnectionURI() + * @see #getZkConnectionURI() + */ + @Deprecated public String getClusterKey() { return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java index e52d8ee92c3c..61e028705f88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java @@ -79,7 +79,7 @@ public void testReplicationPeerModificationSwitch() throws Exception { assertTrue(ADMIN.replicationPeerModificationSwitch(false)); IOException error = assertThrows(IOException.class, () -> ADMIN.addReplicationPeer("peer", ReplicationPeerConfig - .newBuilder().setClusterKey(TEST_UTIL.getClusterKey() + "-test").build())); + .newBuilder().setClusterKey(TEST_UTIL.getRpcConnnectionURI()).build())); assertThat(error.getCause().getMessage(), containsString("Replication peer modification disabled")); // enable again, and the previous value should be false diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java index 157277d83022..f1f47b92c9f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java @@ -86,8 +86,8 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0); TEST_UTIL.startMiniCluster(); - KEY_ONE = TEST_UTIL.getClusterKey() + "-test1"; - KEY_TWO = TEST_UTIL.getClusterKey() + "-test2"; + KEY_ONE = TEST_UTIL.getZkConnectionURI() + "-test1"; + KEY_TWO = TEST_UTIL.getZkConnectionURI() + "-test2"; ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java index 18727866bf78..3144f607a542 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java @@ -87,7 +87,7 @@ public static void setUpBeforeClass() throws Exception { admin2 = connection.getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL2.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL2.getRpcConnnectionURI()).build(); ASYNC_CONN.getAdmin().addReplicationPeer(ID_SECOND, rpc).join(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 3f23364d136c..e02403542a91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -392,7 +392,7 @@ public void testReplicaAndReplication() throws Exception { try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Admin admin = connection.getAdmin()) { ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(HTU2.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(HTU2.getRpcConnnectionURI()).build(); admin.addReplicationPeer("2", rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java index 5d0cc33eacb6..daa29908251a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java @@ -79,7 +79,7 @@ public void testAddPeerWithSameTable() throws Exception { Thread[] threads = new Thread[5]; for (int i = 0; i < 5; i++) { String peerId = "id" + i; - String clusterKey = TEST_UTIL.getClusterKey() + "-test" + i; + String clusterKey = TEST_UTIL.getZkConnectionURI() + "-test" + i; int index = i; threads[i] = new Thread(() -> { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index 5aef1eaf1c6b..da1bc04d7e03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -109,8 +109,8 @@ public static void tearDownAfterClass() throws Exception { public void setup() throws ReplicationException, IOException { root = TEST_UTIL.getDataTestDirOnTestFS(); rp.getPeerStorage().addPeer(peerId, - ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getClusterKey()).build(), true, - SyncReplicationState.NONE); + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getRpcConnnectionURI()).build(), + true, SyncReplicationState.NONE); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDisablePeerModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDisablePeerModification.java index 7b9fa7001559..2ce61cd1b5d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDisablePeerModification.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDisablePeerModification.java @@ -117,7 +117,7 @@ public void testDrainProcs() throws Exception { RESUME = new CountDownLatch(1); AsyncAdmin admin = UTIL.getAsyncConnection().getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL.getClusterKey() + "-test") + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL.getRpcConnnectionURI() + "-test") .setReplicationEndpointImpl(DummyReplicationEndpoint.class.getName()).build(); CompletableFuture addFuture = admin.addReplicationPeer("test_peer_" + async, rpc); ARRIVE.await(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java index b711269c5ba7..7ab7578df1c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java @@ -25,6 +25,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.net.UnknownHostException; import java.util.List; import java.util.Map; import java.util.Optional; @@ -170,8 +171,9 @@ public void setUpBase() throws Exception { BULK_LOADS_COUNT = new AtomicInteger(0); } - private ReplicationPeerConfig getPeerConfigForCluster(HBaseTestingUtil util) { - return ReplicationPeerConfig.newBuilder().setClusterKey(util.getClusterKey()) + private ReplicationPeerConfig getPeerConfigForCluster(HBaseTestingUtil util) + throws UnknownHostException { + return ReplicationPeerConfig.newBuilder().setClusterKey(util.getRpcConnnectionURI()) .setSerial(isSerialPeer()).build(); } @@ -185,7 +187,7 @@ private void setupCoprocessor(HBaseTestingUtil cluster) { cluster.getConfiguration()); cp = r.getCoprocessorHost() .findCoprocessor(TestBulkLoadReplication.BulkReplicationTestObserver.class); - cp.clusterName = cluster.getClusterKey(); + cp.clusterName = cluster.getRpcConnnectionURI(); } } catch (Exception e) { LOG.error(e.getMessage(), e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java index 787784c8ec40..bfc80232792f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java @@ -161,7 +161,7 @@ public void testWhenExcludeCF() throws Exception { Map> excludeTableCFs = Maps.newHashMap(); excludeTableCFs.put(REPLICATE_TABLE, Lists.newArrayList(Bytes.toString(CF_B))); ReplicationPeerConfig peerConfig = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()) .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCFs).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); Assert.assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); @@ -192,7 +192,7 @@ public void testWhenExcludeTable() throws Exception { Map> excludeTableCFs = Maps.newHashMap(); excludeTableCFs.put(NO_REPLICATE_TABLE, null); ReplicationPeerConfig peerConfig = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()) .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCFs).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); @@ -223,7 +223,7 @@ public void testWhenExcludeNamespace() throws Exception { // Add peer, setReplicateAllUserTables true, but exclude one namespace. ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL2.getClusterKey()).setReplicateAllUserTables(true) + .setClusterKey(UTIL2.getRpcConnnectionURI()).setReplicateAllUserTables(true) .setExcludeNamespaces(Sets.newHashSet(NO_REPLICATE_NAMESPACE)).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java index 7f5df02ecfc3..f0caa7a02ba8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java @@ -125,12 +125,12 @@ public static void setUp() throws Exception { new Path(UTIL2.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), "remoteWALs").makeQualified(fs2.getUri(), fs2.getWorkingDirectory()); UTIL1.getAdmin().addReplicationPeer(PEER_ID, - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()) .setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) .setRemoteWALDir(REMOTE_WAL_DIR2.toUri().toString()).build()); UTIL2.getAdmin().addReplicationPeer(PEER_ID, - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL1.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL1.getRpcConnnectionURI()) .setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) .setRemoteWALDir(REMOTE_WAL_DIR1.toUri().toString()).build()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java index 95adc8a365cd..058564dc0ecf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java @@ -19,8 +19,10 @@ import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; +import java.net.URI; import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -36,8 +38,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -48,22 +48,21 @@ public class TestHBaseReplicationEndpoint { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHBaseReplicationEndpoint.class); - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseReplicationEndpoint.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private HBaseReplicationEndpoint endpoint; @Before public void setUp() throws Exception { - try { - ReplicationEndpoint.Context context = new ReplicationEndpoint.Context(null, - UTIL.getConfiguration(), UTIL.getConfiguration(), null, null, null, null, null, null, null); - endpoint = new DummyHBaseReplicationEndpoint(); - endpoint.init(context); - } catch (Exception e) { - LOG.info("Failed", e); - } + ReplicationPeer replicationPeer = mock(ReplicationPeer.class); + ReplicationPeerConfig peerConfig = mock(ReplicationPeerConfig.class); + when(replicationPeer.getPeerConfig()).thenReturn(peerConfig); + when(peerConfig.getClusterKey()).thenReturn("hbase+zk://server1:2181/hbase"); + ReplicationEndpoint.Context context = + new ReplicationEndpoint.Context(null, UTIL.getConfiguration(), UTIL.getConfiguration(), null, + null, null, replicationPeer, null, null, null); + endpoint = new DummyHBaseReplicationEndpoint(); + endpoint.init(context); } @Test @@ -205,7 +204,8 @@ public boolean replicate(ReplicateContext replicateContext) { } @Override - public AsyncClusterConnection createConnection(Configuration conf) throws IOException { + public AsyncClusterConnection createConnection(URI clusterURI, Configuration conf) + throws IOException { return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index f4c26a9b4562..c7a8ec7373b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -616,7 +616,7 @@ private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber) try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); Admin admin = conn.getAdmin()) { admin.addReplicationPeer(id, ReplicationPeerConfig.newBuilder() - .setClusterKey(utilities[slaveClusterNumber].getClusterKey()).build()); + .setClusterKey(utilities[slaveClusterNumber].getRpcConnnectionURI()).build()); } } @@ -626,7 +626,7 @@ private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber, Admin admin = conn.getAdmin()) { admin.addReplicationPeer(id, ReplicationPeerConfig.newBuilder() - .setClusterKey(utilities[slaveClusterNumber].getClusterKey()) + .setClusterKey(utilities[slaveClusterNumber].getRpcConnnectionURI()) .setReplicateAllUserTables(false) .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs)).build()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMigrateRepliationPeerStorageOnline.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMigrateRepliationPeerStorageOnline.java index a824dde42a4a..30dc18f4eb18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMigrateRepliationPeerStorageOnline.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMigrateRepliationPeerStorageOnline.java @@ -64,7 +64,7 @@ public static void tearDown() throws IOException { public void testMigrate() throws Exception { Admin admin = UTIL.getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL.getClusterKey() + "-test") + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL.getRpcConnnectionURI() + "-test") .setReplicationEndpointImpl(DummyReplicationEndpoint.class.getName()).build(); admin.addReplicationPeer("1", rpc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index 66386d275b2e..95772ee639b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -142,7 +142,7 @@ public void testMultiSlaveReplication() throws Exception { Table htable3 = utility3.getConnection().getTable(tableName); ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()); + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getRpcConnnectionURI()); admin1.addReplicationPeer("1", rpcBuilder.build()); // put "row" and wait 'til it got around, then delete @@ -159,7 +159,7 @@ public void testMultiSlaveReplication() throws Exception { // after the log was rolled put a new row putAndWait(row3, famName, htable1, htable2); - rpcBuilder.setClusterKey(utility3.getClusterKey()); + rpcBuilder.setClusterKey(utility3.getRpcConnnectionURI()); admin1.addReplicationPeer("2", rpcBuilder.build()); // put a row, check it was replicated to all clusters diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java index 5fc48b2d7298..cd6e39dc65bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java @@ -158,7 +158,7 @@ public void setUpBase() throws Exception { Set namespaces = new HashSet<>(); namespaces.add(NS1); ReplicationPeerConfig rpc4_ns = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL4.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL4.getRpcConnnectionURI()) .setReplicateAllUserTables(false).setNamespaces(namespaces).build(); admin1.addReplicationPeer(PEER4_NS, rpc4_ns); @@ -169,7 +169,7 @@ public void setUpBase() throws Exception { Map> tableCFsMap = new HashMap<>(); tableCFsMap.put(NS2_TABLE, null); ReplicationPeerConfig rpc4_ns_table = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL4.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL4.getRpcConnnectionURI()) .setReplicateAllUserTables(false).setTableCFsMap(tableCFsMap).build(); admin1.addReplicationPeer(PEER4_NS_TABLE, rpc4_ns_table); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index f0df221c0fe8..62c7c8f5af27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -404,7 +404,7 @@ public void testPerTableCFReplication() throws Exception { tableCFs.get(tabBName).add("f1"); tableCFs.get(tabBName).add("f3"); ReplicationPeerConfig rpc2 = - ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getRpcConnnectionURI()) .setReplicateAllUserTables(false).setTableCFsMap(tableCFs).build(); replicationAdmin.addReplicationPeer("2", rpc2); @@ -414,7 +414,7 @@ public void testPerTableCFReplication() throws Exception { tableCFs.get(tabBName).add("f1"); tableCFs.get(tabBName).add("f2"); ReplicationPeerConfig rpc3 = - ReplicationPeerConfig.newBuilder().setClusterKey(utility3.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(utility3.getRpcConnnectionURI()) .setReplicateAllUserTables(false).setTableCFsMap(tableCFs).build(); replicationAdmin.addReplicationPeer("3", rpc3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 1429c3277371..70a6d73c6202 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -282,10 +282,15 @@ private boolean peerExist(String peerId) throws IOException { return hbaseAdmin.listReplicationPeers().stream().anyMatch(p -> peerId.equals(p.getPeerId())); } + // can be override in tests, in case you need to use zk based uri, or the old style uri + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + return util.getRpcConnnectionURI(); + } + protected final void addPeer(String peerId, TableName tableName) throws Exception { if (!peerExist(peerId)) { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL2.getClusterKey()).setSerial(isSerialPeer()) + .setClusterKey(getClusterKey(UTIL2)).setSerial(isSerialPeer()) .setReplicationEndpointImpl(ReplicationEndpointTest.class.getName()); if (isSyncPeer()) { FileSystem fs2 = UTIL2.getTestFileSystem(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java index fa7548e3eccc..1faa25f116f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java @@ -21,6 +21,7 @@ import static org.junit.Assert.fail; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -43,6 +44,13 @@ public class TestReplicationDisableInactivePeer extends TestReplicationBase { private static final Logger LOG = LoggerFactory.getLogger(TestReplicationDisableInactivePeer.class); + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + // in this test we will restart the peer cluster, and the master address will be changed, so we + // need to use zk based connection uri + return util.getZkConnectionURI(); + } + /** * Test disabling an inactive peer. Add a peer which is inactive, trying to insert, disable the * peer, then activate the peer and make sure nothing is replicated. In Addition, enable the peer diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java index 7137395ef7c2..f9106c7ce23a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java @@ -122,7 +122,7 @@ public void setup() throws Exception { } // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); + .setClusterKey(utility2.getRpcConnnectionURI()).setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table createTable(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java index 3b73262980ae..ed42df416e95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java @@ -125,7 +125,7 @@ public void setup() throws Exception { } // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); + .setClusterKey(utility2.getRpcConnnectionURI()).setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table createTable(NORMAL_TABLE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 3d9fa06d2e75..aae2af10264b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -266,7 +266,7 @@ public void testAddAndRemoveClusters() throws Exception { } } ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()).build(); hbaseAdmin.addReplicationPeer(PEER_ID, rpc); Thread.sleep(SLEEP_TIME); rowKey = Bytes.toBytes("do rep"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java index d9a90b57c2a0..c761078dfab3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Put; @@ -42,6 +43,13 @@ public class TestReplicationStatusAfterLagging extends TestReplicationBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestReplicationStatusAfterLagging.class); + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + // in this test we will restart the peer cluster, and the master address will be changed, so we + // need to use zk based connection uri + return util.getZkConnectionURI(); + } + @Test public void testReplicationStatusAfterLagging() throws Exception { UTIL2.shutdownMiniHBaseCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java index 43c15787839d..8b61c049be7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java @@ -120,7 +120,7 @@ private void createTable(TableName tableName) throws Exception { public void testEditsStuckBehindDeletedCFs() throws Exception { // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); + .setClusterKey(utility2.getRpcConnnectionURI()).setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java index 2a0ccdb9d095..ae720dd4f15c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java @@ -121,7 +121,7 @@ private void createTable(TableName tableName) throws Exception { public void testEditsStuckBehindDroppedTable() throws Exception { // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); + .setClusterKey(utility2.getRpcConnnectionURI()).setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java index 442582410581..9455cf567276 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java @@ -118,12 +118,12 @@ final void setupReplication() throws Exception { admin2.createTable(t2SyncupTarget); // Get HTable from Master - Connection conn1 = ConnectionFactory.createConnection(UTIL1.getConfiguration()); + conn1 = ConnectionFactory.createConnection(UTIL1.getConfiguration()); ht1Source = conn1.getTable(TN1); ht2Source = conn1.getTable(TN2); // Get HTable from Peer1 - Connection conn2 = ConnectionFactory.createConnection(UTIL2.getConfiguration()); + conn2 = ConnectionFactory.createConnection(UTIL2.getConfiguration()); ht1TargetAtPeer1 = conn2.getTable(TN1); ht2TargetAtPeer1 = conn2.getTable(TN2); @@ -131,7 +131,7 @@ final void setupReplication() throws Exception { * set M-S : Master: utility1 Slave1: utility2 */ ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getZkConnectionURI()).build(); admin1.addReplicationPeer("1", rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index 7128b02f3c63..484206ad8387 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -144,7 +144,7 @@ public static void setUpBeforeClass() throws Exception { connection1 = ConnectionFactory.createConnection(conf1); replicationAdmin = connection1.getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getRpcConnnectionURI()).build(); replicationAdmin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithWALExtendedAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithWALExtendedAttributes.java index a41d47df64d7..971b0938ccd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithWALExtendedAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithWALExtendedAttributes.java @@ -145,7 +145,7 @@ public static void setUpBeforeClass() throws Exception { connection1 = ConnectionFactory.createConnection(conf1); replicationAdmin = connection1.getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getRpcConnnectionURI()).build(); replicationAdmin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java index 66720b93606f..6d9e70f851fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.fs.Path; @@ -71,7 +72,7 @@ public void testStandbyKillRegionServer() throws Exception { Thread t = new Thread(() -> { try { List regionServers = - UTIL2.getMiniHBaseCluster().getLiveRegionServerThreads(); + new ArrayList<>(UTIL2.getMiniHBaseCluster().getLiveRegionServerThreads()); LOG.debug("Going to stop {} RSes: [{}]", regionServers.size(), regionServers.stream().map(rst -> rst.getRegionServer().getServerName().getServerName()) .collect(Collectors.joining(", "))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java index 5941e3cc0286..6b14438c8308 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import java.io.IOException; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -30,7 +31,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -40,12 +40,10 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; @@ -55,6 +53,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; + @Category({ ReplicationTests.class, LargeTests.class }) public class TestGlobalReplicationThrottler { @@ -91,21 +91,21 @@ public static void setUpBeforeClass() throws Exception { utility1 = new HBaseTestingUtil(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); - new ZKWatcher(conf1, "cluster1", null, true); + new ZKWatcher(conf1, "cluster1", null, true).close(); conf2 = new Configuration(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); utility2 = new HBaseTestingUtil(conf2); utility2.setZkCluster(miniZK); - new ZKWatcher(conf2, "cluster2", null, true); - - ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()).build(); + new ZKWatcher(conf2, "cluster2", null, true).close(); utility1.startMiniCluster(); utility2.startMiniCluster(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getRpcConnnectionURI()).build(); + try (Connection connection = ConnectionFactory.createConnection(utility1.getConfiguration()); Admin admin1 = connection.getAdmin()) { admin1.addReplicationPeer("peer1", rpc); @@ -121,11 +121,11 @@ public static void tearDownAfterClass() throws Exception { utility1.shutdownMiniCluster(); } - volatile private boolean testQuotaPass = false; - volatile private boolean testQuotaNonZero = false; + private volatile boolean testQuotaPass = false; + private volatile boolean testQuotaNonZero = false; @Test - public void testQuota() throws IOException { + public void testQuota() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder @@ -143,10 +143,8 @@ public void testQuota() throws IOException { testQuotaNonZero = true; } // the reason here doing "numOfPeer + 1" is because by using method addEntryToBatch(), even - // the - // batch size (after added last entry) exceeds quota, it still keeps the last one in the - // batch - // so total used buffer size can be one "replication.total.buffer.quota" larger than + // the batch size (after added last entry) exceeds quota, it still keeps the last one in the + // batch so total used buffer size can be one "replication.total.buffer.quota" larger than // expected if (size > REPLICATION_SOURCE_QUOTA * (numOfPeer + 1)) { // We read logs first then check throttler, so if the buffer quota limiter doesn't @@ -158,35 +156,24 @@ public void testQuota() throws IOException { }); watcher.start(); - try (Table t1 = utility1.getConnection().getTable(tableName); - Table t2 = utility2.getConnection().getTable(tableName)) { + try (Table t1 = utility1.getConnection().getTable(tableName)) { for (int i = 0; i < 50; i++) { Put put = new Put(ROWS[i]); put.addColumn(famName, VALUE, VALUE); t1.put(put); } - long start = EnvironmentEdgeManager.currentTime(); - while (EnvironmentEdgeManager.currentTime() - start < 180000) { - Scan scan = new Scan(); - scan.setCaching(50); - int count = 0; - try (ResultScanner results = t2.getScanner(scan)) { - for (Result result : results) { - count++; - } - } - if (count < 50) { - LOG.info("Waiting all logs pushed to slave. Expected 50 , actual " + count); - Threads.sleep(200); - continue; - } - break; - } } + utility2.waitFor(180000, () -> { + try (Table t2 = utility2.getConnection().getTable(tableName); + ResultScanner results = t2.getScanner(new Scan().setCaching(50))) { + int count = Iterables.size(results); + return count >= 50; + } + }); watcher.interrupt(); - Assert.assertTrue(testQuotaPass); - Assert.assertTrue(testQuotaNonZero); + assertTrue(testQuotaPass); + assertTrue(testQuotaNonZero); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java index d6de3dc7a02f..cdef7de2076b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java @@ -80,6 +80,7 @@ public static void setUpBeforeClass() throws Exception { ReplicationPeerConfig rpc = mock(ReplicationPeerConfig.class); when(rpc.isSerial()).thenReturn(false); when(replicationPeer.getPeerConfig()).thenReturn(rpc); + when(rpc.getClusterKey()).thenReturn("hbase+zk://localhost:2181"); Context context = new Context(null, UTIL.getConfiguration(), UTIL.getConfiguration(), null, null, null, replicationPeer, null, null, null); endpoint = new HBaseInterClusterReplicationEndpoint(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMarker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMarker.java index 79487ab309e6..1466f5f5a03e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMarker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMarker.java @@ -105,7 +105,7 @@ public static void setUpBeforeClass() throws Exception { utility1.startMiniCluster(1); Admin admin1 = utility1.getAdmin(); ReplicationPeerConfigBuilder rpcBuilder = ReplicationPeerConfig.newBuilder(); - rpcBuilder.setClusterKey(utility2.getClusterKey()); + rpcBuilder.setClusterKey(utility2.getRpcConnnectionURI()); admin1.addReplicationPeer("1", rpcBuilder.build()); ReplicationSourceManager manager = utility1.getHBaseCluster().getRegionServer(0) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 53996c376647..05b268d3a0a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -337,8 +337,8 @@ public void testServerShutdownRecoveredQueue() throws Exception { final Admin admin = TEST_UTIL.getAdmin(); final String peerId = "TestPeer"; - admin.addReplicationPeer(peerId, - ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL_PEER.getClusterKey()).build()); + admin.addReplicationPeer(peerId, ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL_PEER.getRpcConnnectionURI()).build()); // Wait for replication sources to come up Waiter.waitFor(conf, 20000, new Waiter.Predicate() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index c48755fb5f0d..979db712ef34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -71,7 +71,7 @@ public void testReplicatorBatching() throws Exception { // Replace the peer set up for us by the base class with a wrapper for this test hbaseAdmin.addReplicationPeer("testReplicatorBatching", - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()) .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build()); ReplicationEndpointForTest.setBatchCount(0); @@ -120,7 +120,7 @@ public void testReplicatorWithErrors() throws Exception { // Replace the peer set up for us by the base class with a wrapper for this test hbaseAdmin.addReplicationPeer("testReplicatorWithErrors", - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()) .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()) .build()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index ec76386046a7..091991b44997 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -137,7 +137,7 @@ public void setup() throws Exception { admin = TEST_UTIL.getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL1.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL1.getRpcConnnectionURI()).build(); admin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index ff586e2b682d..dc313d414ae8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -189,7 +189,7 @@ public void setup() throws Exception { admin = TEST_UTIL.getAdmin(); ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL1.getClusterKey()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL1.getRpcConnnectionURI()).build(); admin.addReplicationPeer("2", rpc); Admin hBaseAdmin = TEST_UTIL.getAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java index 20ed3796dbd9..c8e96383492a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java @@ -182,7 +182,7 @@ public void testCleanReplicationBarrierWithExistTable() throws Exception { public static void createPeer() throws IOException { ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL.getClusterKey() + "-test").setSerial(true).build(); + .setClusterKey(UTIL.getZkConnectionURI() + "-test").setSerial(true).build(); UTIL.getAdmin().addReplicationPeer(PEER_1, rpc); UTIL.getAdmin().addReplicationPeer(PEER_2, rpc); } diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 8cdf2719db93..a0453d7e5f38 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -3528,6 +3528,24 @@ public static String safeGetAsStr(List lst, int i) { } } + public String getRpcConnnectionURI() throws UnknownHostException { + return "hbase+rpc://" + MasterRegistry.getMasterAddr(conf); + } + + public String getZkConnectionURI() { + return "hbase+zk://" + conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + } + + /** + * Get the zk based cluster key for this cluster. + * @deprecated since 2.7.0, will be removed in 4.0.0. Now we use connection uri to specify the + * connection info of a cluster. Keep here only for compatibility. + * @see #getRpcConnnectionURI() + * @see #getZkConnectionURI() + */ + @Deprecated public String getClusterKey() { return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" From 2130e5410f63fead548d14c11d3b439e0d98f603 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 25 May 2024 20:55:21 +0800 Subject: [PATCH 384/514] HBASE-28473 Add 2.4.18 to download page (#5945) Signed-off-by: Xin Sun --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 132dc4927fb6..88cb7ac8d7f0 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -120,24 +120,24 @@ under the License. - 2.4.17 + 2.4.18 - 2023/04/06 + 2024/05/25 - 2.4.17 vs 2.4.16 + 2.4.18 vs 2.4.17 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) From d1d8b4d64591de6cee302d648be257087c0beb48 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 28 May 2024 10:21:24 +0200 Subject: [PATCH 385/514] HBASE-28613 Use streaming when marshalling protobuf REST output (#5943) Signed-off-by: Ankit Singhal --- .../hbase/rest/ProtobufMessageHandler.java | 43 ++++++++++++++++++- .../hadoop/hbase/rest/model/CellModel.java | 5 ++- .../hadoop/hbase/rest/model/CellSetModel.java | 5 ++- .../rest/model/NamespacesInstanceModel.java | 6 ++- .../hbase/rest/model/NamespacesModel.java | 6 ++- .../hadoop/hbase/rest/model/RowModel.java | 4 +- .../hadoop/hbase/rest/model/ScannerModel.java | 5 ++- .../rest/model/StorageClusterStatusModel.java | 5 ++- .../hbase/rest/model/TableInfoModel.java | 5 ++- .../hbase/rest/model/TableListModel.java | 6 ++- .../hbase/rest/model/TableSchemaModel.java | 6 ++- .../hadoop/hbase/rest/model/VersionModel.java | 5 ++- .../producer/ProtobufMessageBodyProducer.java | 2 +- 13 files changed, 79 insertions(+), 24 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index 2e01ff24d477..962e5dfae860 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -18,21 +18,60 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; +import java.io.OutputStream; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + /** * Common interface for models capable of supporting protobuf marshalling and unmarshalling. Hooks * up to the ProtobufMessageBodyConsumer and ProtobufMessageBodyProducer adapters. */ @InterfaceAudience.Private public interface ProtobufMessageHandler { - /** Returns the protobuf represention of the model */ - byte[] createProtobufOutput(); + + // The Jetty 9.4 HttpOutput default commit size is 32K/4 = 8K. We use that size to avoid + // double buffering (and copying) in HttpOutput. If we ever increase the HttpOutput commit size, + // we need to adjust this accordingly. We should also revisit this when Jetty is upgraded. + int BUFFER_SIZE = 8 * 1024; + + /** Writes the protobuf represention of the model to os */ + default void writeProtobufOutput(OutputStream os) throws IOException { + // Creating an explicit CodedOutputStream for the following reasons : + // 1. This avoids the cost of pre-computing the message size + // 2. This lets us set the buffer size explicitly + CodedOutputStream cos = CodedOutputStream.newInstance(os, BUFFER_SIZE); + messageFromObject().writeTo(cos); + cos.flush(); + } + + /** + * Returns the protobuf represention of the model in a byte array Use + * {@link org.apache.hadoop.hbase.rest.ProtobufMessageHandler#writeProtobufOutput(OutputStream)} + * for better performance + * @return the protobuf encoded object in a byte array + */ + default byte[] createProtobufOutput() { + return messageFromObject().toByteArray(); + } + + /** + * Convert to model to a protobuf Message object + * @return the protobuf Message object + */ + Message messageFromObject(); /** * Initialize the model from a protobuf representation. * @param message the raw bytes of the protobuf message * @return reference to self for convenience */ + // TODO implement proper stream handling for unmarshalling. + // Using byte array here lets us use ProtobufUtil.mergeFrom in the implementations to + // avoid the CodedOutputStream size limitation, but is slow + // and memory intensive. We could use the ProtobufUtil.mergeFrom() variant that takes + // an inputStream and sets the size limit to maxInt. + // This would help both on the client side, and when processing large Puts on the server. ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index 4284727e4380..3d8806b7dc00 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -202,7 +203,7 @@ public int getValueLength() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { Cell.Builder builder = Cell.newBuilder(); builder.setColumn(UnsafeByteOperations.unsafeWrap(getColumn())); if (valueLength == MAGIC_LENGTH) { @@ -213,7 +214,7 @@ public byte[] createProtobufOutput() { if (hasUserTimestamp()) { builder.setTimestamp(getTimestamp()); } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index 8908ec7e6c88..8486be2762fe 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -108,7 +109,7 @@ public List getRows() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { CellSet.Builder builder = CellSet.newBuilder(); for (RowModel row : getRows()) { CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder(); @@ -134,7 +135,7 @@ public byte[] createProtobufOutput() { } builder.addRows(rowBuilder); } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index 64b46f2956c8..78f647203851 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacePropertiesMessage.NamespaceProperties; /** @@ -140,7 +142,7 @@ public String toString() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); if (properties != null) { for (Map.Entry entry : properties.entrySet()) { @@ -151,7 +153,7 @@ public byte[] createProtobufOutput() { builder.addProps(property); } } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index e866c7a935d1..90e4f6560a51 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacesMessage.Namespaces; /** @@ -95,10 +97,10 @@ public String toString() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { Namespaces.Builder builder = Namespaces.newBuilder(); builder.addAllNamespace(namespaces); - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index 8b660ac362fc..e200dfbc1f35 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + /** * Representation of a row. A row is a related set of cells, grouped by common row key. RowModels do * not appear in results by themselves. They are always encapsulated within CellSetModels. @@ -179,7 +181,7 @@ public List getCells() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { // there is no standalone row protobuf message throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 3655a3798041..4c241753e5e8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -71,6 +71,7 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; @@ -809,7 +810,7 @@ public void setFilter(String filter) { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { Scanner.Builder builder = Scanner.newBuilder(); if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) { builder.setStartRow(UnsafeByteOperations.unsafeWrap(startRow)); @@ -842,7 +843,7 @@ public byte[] createProtobufOutput() { builder.addLabels(label); } builder.setCacheBlocks(cacheBlocks); - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index e0102811142a..c9370cad901b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -672,7 +673,7 @@ public String toString() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); builder.setRegions(regions); builder.setRequests(requests); @@ -708,7 +709,7 @@ public byte[] createProtobufOutput() { for (String node : deadNodes) { builder.addDeadNodes(node); } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java index 74d0732ec918..43b131fcb701 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -123,7 +124,7 @@ public String toString() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { TableInfo.Builder builder = TableInfo.newBuilder(); builder.setName(name); for (TableRegionModel aRegion : regions) { @@ -135,7 +136,7 @@ public byte[] createProtobufOutput() { regionBuilder.setLocation(aRegion.getLocation()); builder.addRegions(regionBuilder); } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java index 76854acdf6ae..63b2e809279c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableListMessage.TableList; @@ -90,12 +92,12 @@ public String toString() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { TableList.Builder builder = TableList.newBuilder(); for (TableModel aTable : tables) { builder.addName(aTable.getName()); } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index 06abe355859a..f2a8c4c7060d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableSchemaMessage.TableSchema; @@ -248,7 +250,7 @@ public void __setReadOnly(boolean value) { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { TableSchema.Builder builder = TableSchema.newBuilder(); builder.setName(name); for (Map.Entry e : attrs.entrySet()) { @@ -281,7 +283,7 @@ public byte[] createProtobufOutput() { if (attrs.containsKey(READONLY)) { builder.setReadOnly(Boolean.parseBoolean(attrs.get(READONLY).toString())); } - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java index e5d79af5e55c..65eca57ac5a3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.rest.RESTServlet; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -162,14 +163,14 @@ public String toString() { } @Override - public byte[] createProtobufOutput() { + public Message messageFromObject() { Version.Builder builder = Version.newBuilder(); builder.setRestVersion(restVersion); builder.setJvmVersion(jvmVersion); builder.setOsVersion(osVersion); builder.setServerVersion(serverVersion); builder.setJerseyVersion(jerseyVersion); - return builder.build().toByteArray(); + return builder.build(); } @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java index 1d95e6f343e7..4a7806e652b1 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java @@ -59,6 +59,6 @@ public long getSize(ProtobufMessageHandler m, Class type, Type genericType, public void writeTo(ProtobufMessageHandler m, Class type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, OutputStream entityStream) throws IOException, WebApplicationException { - entityStream.write(m.createProtobufOutput()); + m.writeProtobufOutput(entityStream); } } From 826fb411c8106108577952f7e33abfc8474a62a5 Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Tue, 28 May 2024 22:57:53 +0800 Subject: [PATCH 386/514] HBASE-28588 Remove deprecated methods in WAL (#5893) Signed-off-by: Duo Zhang --- .../hbase/regionserver/wal/AbstractFSWAL.java | 13 +++--- .../wal/SequenceIdAccounting.java | 7 ++- .../hadoop/hbase/wal/DisabledWALProvider.java | 5 --- .../java/org/apache/hadoop/hbase/wal/WAL.java | 10 ----- .../TestPerColumnFamilyFlush.java | 18 ++++---- .../TestWalAndCompactingMemStoreFlush.java | 45 ++++++++++--------- .../regionserver/wal/AbstractTestFSWAL.java | 12 ++++- .../hbase/regionserver/wal/TestFSHLog.java | 2 +- 8 files changed, 57 insertions(+), 55 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 7a057ca7c7b6..5f06b04cdf92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -27,6 +27,7 @@ import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import com.google.errorprone.annotations.RestrictedApi; import com.lmax.disruptor.RingBuffer; import com.lmax.disruptor.Sequence; import com.lmax.disruptor.Sequencer; @@ -686,12 +687,6 @@ public void abortCacheFlush(byte[] encodedRegionName) { this.sequenceIdAccounting.abortCacheFlush(encodedRegionName); } - @Override - public long getEarliestMemStoreSeqNum(byte[] encodedRegionName) { - // Used by tests. Deprecated as too subtle for general usage. - return this.sequenceIdAccounting.getLowestSequenceId(encodedRegionName); - } - @Override public long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName) { // This method is used by tests and for figuring if we should flush or not because our @@ -730,6 +725,12 @@ public final void sync(long txid, boolean forceSync) throws IOException { TraceUtil.trace(() -> doSync(txid, forceSync), () -> createSpan("WAL.sync")); } + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public SequenceIdAccounting getSequenceIdAccounting() { + return sequenceIdAccounting; + } + /** * This is a convenience method that computes a new filename with a given file-number. * @param filenum to use diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 14d6a9739393..1073d841ba4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; +import com.google.errorprone.annotations.RestrictedApi; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -50,7 +51,7 @@ *

    */ @InterfaceAudience.Private -class SequenceIdAccounting { +public class SequenceIdAccounting { private static final Logger LOG = LoggerFactory.getLogger(SequenceIdAccounting.class); /** @@ -112,7 +113,9 @@ class SequenceIdAccounting { * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will return * {@link HConstants#NO_SEQNUM} when none. */ - long getLowestSequenceId(final byte[] encodedRegionName) { + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public long getLowestSequenceId(final byte[] encodedRegionName) { synchronized (this.tieLock) { Map m = this.flushingSequenceIds.get(encodedRegionName); long flushingLowest = m != null ? getLowestSequenceId(m) : Long.MAX_VALUE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 4700ecdea8e8..ea071e5ca22a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -237,11 +237,6 @@ public WALCoprocessorHost getCoprocessorHost() { return coprocessorHost; } - @Override - public long getEarliestMemStoreSeqNum(byte[] encodedRegionName) { - return HConstants.NO_SEQNUM; - } - @Override public long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName) { return HConstants.NO_SEQNUM; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 2bdb1e41eb5f..d9090f923c8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -212,16 +212,6 @@ default void sync(long txid, boolean forceSync) throws IOException { /** Returns Coprocessor host. */ WALCoprocessorHost getCoprocessorHost(); - /** - * Gets the earliest unflushed sequence id in the memstore for the region. - * @param encodedRegionName The region to get the number for. - * @return The earliest/lowest/oldest sequence id if present, HConstants.NO_SEQNUM if absent. - * @deprecated Since version 1.2.0. Removing because not used and exposes subtle internal - * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} - */ - @Deprecated - long getEarliestMemStoreSeqNum(byte[] encodedRegionName); - /** * Gets the earliest unflushed sequence id in the memstore for the store. * @param encodedRegionName The region to get the number for. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 344d445ceb3a..c015e630f1c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -162,8 +163,8 @@ public void testSelectiveFlushWhenEnabled() throws IOException { MemStoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); // Get the overall smallest LSN in the region's memstores. - long smallestSeqInRegionCurrentMemstore = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstore = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); // The overall smallest LSN in the region's memstores should be the same as // the LSN of the smallest edit in CF1 @@ -193,8 +194,8 @@ public void testSelectiveFlushWhenEnabled() throws IOException { cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); totalMemstoreSize = region.getMemStoreDataSize(); - smallestSeqInRegionCurrentMemstore = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + smallestSeqInRegionCurrentMemstore = AbstractTestFSWAL.getEarliestMemStoreSeqNum(getWAL(region), + region.getRegionInfo().getEncodedNameAsBytes()); // We should have cleared out only CF1, since we chose the flush thresholds // and number of puts accordingly. @@ -231,8 +232,8 @@ public void testSelectiveFlushWhenEnabled() throws IOException { cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); totalMemstoreSize = region.getMemStoreDataSize(); - smallestSeqInRegionCurrentMemstore = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + smallestSeqInRegionCurrentMemstore = AbstractTestFSWAL.getEarliestMemStoreSeqNum(getWAL(region), + region.getRegionInfo().getEncodedNameAsBytes()); // CF1 and CF2, both should be absent. assertEquals(0, cf1MemstoreSize.getDataSize()); @@ -242,6 +243,7 @@ public void testSelectiveFlushWhenEnabled() throws IOException { // CF3 shouldn't have been touched. assertEquals(cf3MemstoreSize, oldCF3MemstoreSize); assertEquals(totalMemstoreSize, cf3MemstoreSize.getDataSize()); + assertEquals(smallestSeqInRegionCurrentMemstore, smallestSeqCF3); // What happens when we hit the memstore limit, but we are not able to find // any Column Family above the threshold? @@ -313,8 +315,8 @@ public void testSelectiveFlushWhenNotEnabled() throws IOException { cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); totalMemstoreSize = region.getMemStoreDataSize(); - long smallestSeqInRegionCurrentMemstore = - region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstore = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(region.getWAL(), region.getRegionInfo().getEncodedNameAsBytes()); // Everything should have been cleared assertEquals(0, cf1MemstoreSize.getDataSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 99afc433ad42..32944a4147fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -177,8 +178,8 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); // Get the overall smallest LSN in the region's memstores. - long smallestSeqInRegionCurrentMemstorePhaseI = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseI = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); String s = "\n\n----------------------------------\n" + "Upon initial insert and before any flush, size of CF1 is:" + cf1MemstoreSizePhaseI @@ -224,8 +225,8 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseII = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseII = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2); @@ -280,8 +281,8 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseIV = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseIV = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2); long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); @@ -318,8 +319,8 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseV = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseV = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(0, cf1MemstoreSizePhaseV.getDataSize()); assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSizePhaseV.getHeapSize()); @@ -405,8 +406,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); // Get the overall smallest LSN in the region's memstores. - long smallestSeqInRegionCurrentMemstorePhaseI = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseI = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); /*------------------------------------------------------------------------------*/ /* PHASE I - validation */ @@ -458,8 +459,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseII = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseII = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); long totalMemstoreSizePhaseII = region.getMemStoreDataSize(); @@ -531,8 +532,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseIV = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseIV = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); /*------------------------------------------------------------------------------*/ @@ -563,8 +564,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseV = - getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseV = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(getWAL(region), region.getRegionInfo().getEncodedNameAsBytes()); long totalMemstoreSizePhaseV = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ @@ -683,8 +684,8 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { MemStoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseII = - region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseII = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(region.getWAL(), region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2); long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); @@ -713,8 +714,8 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { region.put(createPut(2, i)); } - long smallestSeqInRegionCurrentMemstorePhaseIII = - region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseIII = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(region.getWAL(), region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseIII = region.getOldestSeqIdOfStore(FAMILY2); long smallestSeqCF3PhaseIII = region.getOldestSeqIdOfStore(FAMILY3); @@ -731,8 +732,8 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { cms3.flushInMemory(); region.flush(false); - long smallestSeqInRegionCurrentMemstorePhaseIV = - region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseIV = AbstractTestFSWAL + .getEarliestMemStoreSeqNum(region.getWAL(), region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2); long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index ca433a8ef717..fd80f6cceae4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -624,7 +624,7 @@ public void testUnflushedSeqIdTrackingWithAsyncWal() throws IOException, Interru }, startHoldingForAppend, closeFinished, holdAppend); // now check the region's unflushed seqIds. - long seqId = wal.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long seqId = getEarliestMemStoreSeqNum(wal, region.getRegionInfo().getEncodedNameAsBytes()); assertEquals("Found seqId for the region which is already closed", HConstants.NO_SEQNUM, seqId); } finally { @@ -634,6 +634,16 @@ public void testUnflushedSeqIdTrackingWithAsyncWal() throws IOException, Interru } } + public static long getEarliestMemStoreSeqNum(WAL wal, byte[] encodedRegionName) { + if (wal != null) { + if (wal instanceof AbstractFSWAL) { + return ((AbstractFSWAL) wal).getSequenceIdAccounting() + .getLowestSequenceId(encodedRegionName); + } + } + return HConstants.NO_SEQNUM; + } + private static final Set STORES_TO_FLUSH = Collections.newSetFromMap(new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index 07a97a1e0e97..ec993b897684 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -212,7 +212,7 @@ public void run() { assertEquals("Region did not flush?", 1, region.getStoreFileList(new byte[][] { b }).size()); // now check the region's unflushed seqIds. - long seqId = log.getEarliestMemStoreSeqNum(hri.getEncodedNameAsBytes()); + long seqId = AbstractTestFSWAL.getEarliestMemStoreSeqNum(log, hri.getEncodedNameAsBytes()); assertEquals("Found seqId for the region which is already flushed", HConstants.NO_SEQNUM, seqId); From 771e1d92c0a342c4511d69d2ece4d7d58cd47d45 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 29 May 2024 15:21:18 +0800 Subject: [PATCH 387/514] HBASE-28616 Remove/Deprecated the rs.* related configuration in TableOutputFormat (#5946) Signed-off-by: Andrew Purtell Signed-off-by: Pankaj Kumar Reviewed-by: Subrat Mishra --- ...rationTestBigLinkedListWithVisibility.java | 2 +- .../hadoop/hbase/mapreduce/CopyTable.java | 3 +- .../hadoop/hbase/mapreduce/SyncTable.java | 3 +- .../hbase/mapreduce/TableMapReduceUtil.java | 87 +++++++++++++++---- .../hbase/mapreduce/TableOutputFormat.java | 22 +++-- 5 files changed, 87 insertions(+), 30 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index 38c91fcb37c3..25640ed294d5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -275,7 +275,7 @@ public int runCopier(String outputDir) throws Exception { } job.getConfiguration().setBoolean("mapreduce.map.speculative", false); job.getConfiguration().setBoolean("mapreduce.reduce.speculative", false); - TableMapReduceUtil.initTableReducerJob(COMMON_TABLE_NAME, null, job, null, null, null, null); + TableMapReduceUtil.initTableReducerJob(COMMON_TABLE_NAME, null, job); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), AbstractHBaseTool.class); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index 273271b1867b..8564c105331e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -181,8 +181,7 @@ public Job createSubmittableJob(String[] args) throws IOException { } } else { initCopyTableMapperReducerJob(job, scan); - TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress, null, - null); + TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress); } return job; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index b8b0dceea381..c1cf132d0302 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -167,8 +167,7 @@ public Job createSubmittableJob(String[] args) throws IOException { } else { // No reducers. Just write straight to table. Call initTableReducerJob // because it sets up the TableOutputFormat. - TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster, - null, null); + TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster); // would be nice to add an option for bulk load instead } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index f189767a7c76..a23393ff804c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -549,7 +549,7 @@ public static void initTableReducerJob(String table, Class reducer, Job job, Class partitioner) throws IOException { - initTableReducerJob(table, reducer, job, partitioner, null, null, null); + initTableReducerJob(table, reducer, job, partitioner, null); } /** @@ -569,15 +569,11 @@ public static void initTableReducerJob(String table, Class <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> * such as server,server2,server3:2181:/hbase. - * @param serverClass redefined hbase.regionserver.class - * @param serverImpl redefined hbase.regionserver.impl * @throws IOException When determining the region count fails. */ public static void initTableReducerJob(String table, Class reducer, - Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl) - throws IOException { - initTableReducerJob(table, reducer, job, partitioner, quorumAddress, serverClass, serverImpl, - true); + Job job, Class partitioner, String quorumAddress) throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress, true); } /** @@ -597,16 +593,13 @@ public static void initTableReducerJob(String table, Class <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> * such as server,server2,server3:2181:/hbase. - * @param serverClass redefined hbase.regionserver.class - * @param serverImpl redefined hbase.regionserver.impl * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ public static void initTableReducerJob(String table, Class reducer, - Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, - boolean addDependencyJars) throws IOException { - + Job job, Class partitioner, String quorumAddress, boolean addDependencyJars) + throws IOException { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); job.setOutputFormatClass(TableOutputFormat.class); @@ -620,10 +613,6 @@ public static void initTableReducerJob(String table, Classnull to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the + * zookeeper ensemble of an alternate remote cluster when you would have the + * reduce write a cluster that is other than the default; e.g. copying tables + * between clusters, the source would be designated by + * hbase-site.xml and this param would have the ensemble address + * of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< + * hbase.zookeeper.client.port>:<zookeeper.znode.parent> + * such as server,server2,server3:2181:/hbase. + * @param serverClass redefined hbase.regionserver.class + * @param serverImpl redefined hbase.regionserver.impl + * @throws IOException When determining the region count fails. + * @deprecated Since 2.5.9, 2.6.1, 2.7.0, will be removed in 4.0.0. The {@code serverClass} and + * {@code serverImpl} do not take effect any more, just use + * {@link #initTableReducerJob(String, Class, Job, Class, String)} instead. + * @see #initTableReducerJob(String, Class, Job, Class, String) + */ + @Deprecated + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl) + throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress); + } + + /** + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to + * the zookeeper ensemble of an alternate remote cluster when you would + * have the reduce write a cluster that is other than the default; e.g. + * copying tables between clusters, the source would be designated by + * hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< + * hbase.zookeeper.client.port>:<zookeeper.znode.parent> + * such as server,server2,server3:2181:/hbase. + * @param serverClass redefined hbase.regionserver.class + * @param serverImpl redefined hbase.regionserver.impl + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @throws IOException When determining the region count fails. + * @deprecated Since 2.5.9, 2.6.1, 2.7.0, will be removed in 4.0.0. The {@code serverClass} and + * {@code serverImpl} do not take effect any more, just use + * {@link #initTableReducerJob(String, Class, Job, Class, String, boolean)} instead. + * @see #initTableReducerJob(String, Class, Job, Class, String, boolean) + */ + @Deprecated + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, + boolean addDependencyJars) throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress, addDependencyJars); + } + /** * Ensures that the given number of reduce tasks for the given job configuration does not exceed * the number of regions for the given table. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 17c6c0e45511..a8ec67c9b237 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -66,16 +66,26 @@ public class TableOutputFormat extends OutputFormat implemen * Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying * between hbase clusters (the source is picked up from hbase-site.xml). * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, - * Class, String, String, String) + * Class, String) */ public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum"; /** Optional job parameter to specify peer cluster's ZK client port */ public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port"; - /** Optional specification of the rs class name of the peer cluster */ + /** + * Optional specification of the rs class name of the peer cluster. + * @deprecated Since 2.5.9, 2.6.1 and 2.7.0, will be removed in 4.0.0. Does not take effect from + * long ago, see HBASE-6044. + */ + @Deprecated public static final String REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; - /** Optional specification of the rs impl name of the peer cluster */ + /** + * Optional specification of the rs impl name of the peer cluster + * @deprecated Since 2.5.9, 2.6.1 and 2.7.0, will be removed in 4.0.0. Does not take effect from + * long ago, see HBASE-6044. + */ + @Deprecated public static final String REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; /** The configuration. */ @@ -208,15 +218,9 @@ public void setConf(Configuration otherConf) { String address = otherConf.get(QUORUM_ADDRESS); int zkClientPort = otherConf.getInt(QUORUM_PORT, 0); - String serverClass = otherConf.get(REGION_SERVER_CLASS); - String serverImpl = otherConf.get(REGION_SERVER_IMPL); try { this.conf = HBaseConfiguration.createClusterConf(otherConf, address, OUTPUT_CONF_PREFIX); - - if (serverClass != null) { - this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl); - } if (zkClientPort != 0) { this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort); } From 0d2edbc0f602916d8b5d931f8180ca992a7a8a35 Mon Sep 17 00:00:00 2001 From: Andor Molnar Date: Wed, 29 May 2024 14:33:16 +0200 Subject: [PATCH 388/514] Add andor to the developer list --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 7d23580698f8..a34fe50d7059 100644 --- a/pom.xml +++ b/pom.xml @@ -76,6 +76,12 @@ allan163@apache.org +8 + + andor + Andor Molnar + andor@apache.org + -5 + appy Apekshit Sharma From c4a7606479a847ffaada470c4bbae1d5f018ea02 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 29 May 2024 22:52:48 +0800 Subject: [PATCH 389/514] HBASE-28582 ModifyTableProcedure should not reset TRSP on region node when closing unused region replicas (#5903) Signed-off-by: Viraj Jasani --- .../server/master/MasterProcedure.proto | 10 ++ .../master/assignment/AssignmentManager.java | 56 +++++- .../CloseExcessRegionReplicasProcedure.java | 159 ++++++++++++++++++ .../procedure/ModifyTableProcedure.java | 3 +- ...educeExcessRegionReplicasBlockedByRIT.java | 107 ++++++++++++ .../TestRegionReplicasWithModifyTable.java | 2 +- 6 files changed, 327 insertions(+), 10 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReduceExcessRegionReplicasBlockedByRIT.java diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index c9c9c6357312..48d20b6bef27 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -780,3 +780,13 @@ enum MigrateNamespaceTableProcedureState { message MigrateNamespaceTableProcedureStateData { } + +enum CloseExcessRegionReplicasProcedureState { + CLOSE_EXCESS_REGION_REPLICAS_SCHEDULE = 1; + CLOSE_EXCESS_REGION_REPLICAS_CONFIRM = 2; +} + +message CloseExcessRegionReplicasProcedureStateData { + required TableName table_name = 1; + required uint32 new_replica_count = 2; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 9cee9f87ce2f..bcfa50fe66d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; @@ -1084,14 +1085,55 @@ public TransitRegionStateProcedure[] createUnassignProceduresForDisabling(TableN } /** - * Called by ModifyTableProcedures to unassign all the excess region replicas for a table. + * Called by ModifyTableProcedure to unassign all the excess region replicas for a table. Will + * skip submit unassign procedure if the region is in transition, so you may need to call this + * method multiple times. + * @param tableName the table for closing excess region replicas + * @param newReplicaCount the new replica count, should be less than current replica count + * @param submit for submitting procedure + * @return the number of regions in transition that we can not schedule unassign procedures */ - public TransitRegionStateProcedure[] createUnassignProceduresForClosingExcessRegionReplicas( - TableName tableName, int newReplicaCount) { - return regionStates.getTableRegionStateNodes(tableName).stream() - .filter(regionNode -> regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) - .map(this::forceCreateUnssignProcedure).filter(p -> p != null) - .toArray(TransitRegionStateProcedure[]::new); + public int submitUnassignProcedureForClosingExcessRegionReplicas(TableName tableName, + int newReplicaCount, Consumer submit) { + int inTransitionCount = 0; + for (RegionStateNode regionNode : regionStates.getTableRegionStateNodes(tableName)) { + regionNode.lock(); + try { + if (regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) { + if (regionNode.isInTransition()) { + LOG.debug("skip scheduling unassign procedure for {} when closing excess region " + + "replicas since it is in transition", regionNode); + inTransitionCount++; + continue; + } + if (regionNode.isInState(State.OFFLINE, State.CLOSED, State.SPLIT)) { + continue; + } + submit.accept(regionNode.setProcedure(TransitRegionStateProcedure + .unassign(getProcedureEnvironment(), regionNode.getRegionInfo()))); + } + } finally { + regionNode.unlock(); + } + } + return inTransitionCount; + } + + public int numberOfUnclosedExcessRegionReplicas(TableName tableName, int newReplicaCount) { + int unclosed = 0; + for (RegionStateNode regionNode : regionStates.getTableRegionStateNodes(tableName)) { + regionNode.lock(); + try { + if (regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) { + if (!regionNode.isInState(State.OFFLINE, State.CLOSED, State.SPLIT)) { + unclosed++; + } + } + } finally { + regionNode.unlock(); + } + } + return unclosed; } public SplitTableRegionProcedure createSplitProcedure(final RegionInfo regionToSplit, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java new file mode 100644 index 000000000000..61e7c0f86075 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import org.apache.commons.lang3.mutable.MutableBoolean; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloseExcessRegionReplicasProcedureState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloseExcessRegionReplicasProcedureStateData; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; + +/** + * Procedure for close excess region replicas. + */ +@InterfaceAudience.Private +public class CloseExcessRegionReplicasProcedure + extends AbstractStateMachineTableProcedure { + + private static final Logger LOG = + LoggerFactory.getLogger(CloseExcessRegionReplicasProcedure.class); + + private TableName tableName; + private int newReplicaCount; + + private RetryCounter retryCounter; + + public CloseExcessRegionReplicasProcedure() { + } + + public CloseExcessRegionReplicasProcedure(TableName tableName, int newReplicaCount) { + this.tableName = tableName; + this.newReplicaCount = newReplicaCount; + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.REGION_EDIT; + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, + CloseExcessRegionReplicasProcedureState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.trace("{} execute state={}", this, state); + switch (state) { + case CLOSE_EXCESS_REGION_REPLICAS_SCHEDULE: + MutableBoolean submitted = new MutableBoolean(false); + int inTransitionCount = env.getAssignmentManager() + .submitUnassignProcedureForClosingExcessRegionReplicas(tableName, newReplicaCount, p -> { + submitted.setTrue(); + addChildProcedure(p); + }); + if (inTransitionCount > 0 && submitted.isFalse()) { + // we haven't scheduled any unassign procedures and there are still regions in + // transition, sleep for a while and try again + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoffMillis = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.info( + "There are still {} region(s) in transition for table {} when closing excess" + + " region replicas, suspend {}secs and try again later", + inTransitionCount, tableName, backoffMillis / 1000); + suspend((int) backoffMillis, true); + } + setNextState(CloseExcessRegionReplicasProcedureState.CLOSE_EXCESS_REGION_REPLICAS_CONFIRM); + return Flow.HAS_MORE_STATE; + case CLOSE_EXCESS_REGION_REPLICAS_CONFIRM: + int unclosedCount = env.getAssignmentManager() + .numberOfUnclosedExcessRegionReplicas(tableName, newReplicaCount); + if (unclosedCount > 0) { + LOG.info("There are still {} unclosed region(s) for table {} when closing excess" + + " region replicas, continue..."); + setNextState( + CloseExcessRegionReplicasProcedureState.CLOSE_EXCESS_REGION_REPLICAS_SCHEDULE); + return Flow.HAS_MORE_STATE; + } else { + return Flow.NO_MORE_STATE; + } + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } + + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + + @Override + protected void rollbackState(MasterProcedureEnv env, + CloseExcessRegionReplicasProcedureState state) throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + protected CloseExcessRegionReplicasProcedureState getState(int stateId) { + return CloseExcessRegionReplicasProcedureState.forNumber(stateId); + } + + @Override + protected int getStateId(CloseExcessRegionReplicasProcedureState state) { + return state.getNumber(); + } + + @Override + protected CloseExcessRegionReplicasProcedureState getInitialState() { + return CloseExcessRegionReplicasProcedureState.CLOSE_EXCESS_REGION_REPLICAS_SCHEDULE; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + CloseExcessRegionReplicasProcedureStateData data = CloseExcessRegionReplicasProcedureStateData + .newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setNewReplicaCount(newReplicaCount).build(); + serializer.serialize(data); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + CloseExcessRegionReplicasProcedureStateData data = + serializer.deserialize(CloseExcessRegionReplicasProcedureStateData.class); + tableName = ProtobufUtil.toTableName(data.getTableName()); + newReplicaCount = data.getNewReplicaCount(); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 9a52dbd079dd..45153612259b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -525,8 +525,7 @@ private void closeExcessReplicasIfNeeded(MasterProcedureEnv env) { if (newReplicaCount >= oldReplicaCount) { return; } - addChildProcedure(env.getAssignmentManager() - .createUnassignProceduresForClosingExcessRegionReplicas(getTableName(), newReplicaCount)); + addChildProcedure(new CloseExcessRegionReplicasProcedure(getTableName(), newReplicaCount)); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReduceExcessRegionReplicasBlockedByRIT.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReduceExcessRegionReplicasBlockedByRIT.java new file mode 100644 index 000000000000..4cdf1ba3a633 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReduceExcessRegionReplicasBlockedByRIT.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.CompletableFuture; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.procedure.CloseExcessRegionReplicasProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * A test to make sure that we will wait for RIT to finish while closing excess region replicas. See + * HBASE-28582 and related issues for more details. + */ +@Category({ MasterTests.class, MediumTests.class }) +public class TestReduceExcessRegionReplicasBlockedByRIT { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestReduceExcessRegionReplicasBlockedByRIT.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static TableDescriptor TD = + TableDescriptorBuilder.newBuilder(TableName.valueOf("CloseExcessRegionReplicas")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).setRegionReplication(4).build(); + + @BeforeClass + public static void setUp() throws Exception { + UTIL.startMiniCluster(1); + UTIL.getAdmin().createTable(TD); + UTIL.waitTableAvailable(TD.getTableName()); + UTIL.waitUntilNoRegionsInTransition(); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testRIT() throws Exception { + RegionStateNode rsn = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getTableRegionStateNodes(TD.getTableName()).stream() + .filter(rn -> rn.getRegionInfo().getReplicaId() > 1).findAny().get(); + // fake a TRSP to block the CloseExcessRegionReplicasProcedure + TransitRegionStateProcedure trsp = new TransitRegionStateProcedure(); + rsn.setProcedure(trsp); + TableDescriptor newTd = TableDescriptorBuilder.newBuilder(TD).setRegionReplication(2).build(); + CompletableFuture future = UTIL.getAsyncConnection().getAdmin().modifyTable(newTd); + ProcedureExecutor procExec = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.waitFor(5000, () -> procExec.getProcedures().stream() + .anyMatch(p -> p instanceof CloseExcessRegionReplicasProcedure && !p.isFinished())); + CloseExcessRegionReplicasProcedure proc = + procExec.getProcedures().stream().filter(p -> p instanceof CloseExcessRegionReplicasProcedure) + .map(p -> (CloseExcessRegionReplicasProcedure) p).findFirst().get(); + // make sure that the procedure can not finish + for (int i = 0; i < 5; i++) { + Thread.sleep(3000); + assertFalse(proc.isFinished()); + } + assertTrue(rsn.isInState(RegionState.State.OPEN)); + // unset the procedure, so we could make progress on CloseExcessRegionReplicasProcedure + rsn.unsetProcedure(trsp); + UTIL.waitFor(60000, () -> proc.isFinished()); + + future.get(); + + // the region should be in CLOSED state, and should have been removed from AM + assertTrue(rsn.isInState(RegionState.State.CLOSED)); + // only 2 replicas now + assertEquals(2, UTIL.getMiniHBaseCluster().getRegions(TD.getTableName()).size()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java index ca6a745d91bf..935e33dacdc4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java @@ -157,7 +157,7 @@ public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreasedWithMultip } @Test - public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreasedWithmultipleRegions() + public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreasedWithMultipleRegions() throws Exception { enableReplicationByModification(true, 2, 3, 15); } From cd0b29c0a8fed71ec7ccb9b05a2f9b1a65d07b64 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 30 May 2024 07:27:50 +0200 Subject: [PATCH 390/514] =?UTF-8?q?HBASE-28626=20MultiRowRangeFilter=20des?= =?UTF-8?q?erialization=20fails=20in=20org.apache.h=E2=80=A6=20(#5951)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ankit Singhal --- .../hadoop/hbase/rest/model/ScannerModel.java | 86 ++++++++++++++++++- .../hbase/rest/TestScannersWithFilters.java | 15 ++++ 2 files changed, 97 insertions(+), 4 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 4c241753e5e8..9bd740c1b3b9 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -21,10 +21,12 @@ import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; +import java.util.Arrays; import java.util.Base64; import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Objects; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; @@ -203,6 +205,79 @@ public ByteArrayComparable build() { } + /** + * This DTO omits the pseudo-getters in MultiRowRangeFilter.RowRange which break Jackson + * deserialization. It also avoids adding those as dummy JSON elements. + */ + static class RowRangeModel { + + protected byte[] startRow; + + protected boolean startRowInclusive = true; + + protected byte[] stopRow; + + protected boolean stopRowInclusive = false; + + public RowRangeModel() { + } + + public RowRangeModel(MultiRowRangeFilter.RowRange rr) { + this.startRow = rr.getStartRow(); + this.startRowInclusive = rr.isStartRowInclusive(); + this.stopRow = rr.getStopRow(); + this.stopRowInclusive = rr.isStopRowInclusive(); + } + + public MultiRowRangeFilter.RowRange build() { + return new MultiRowRangeFilter.RowRange(startRow, startRowInclusive, stopRow, + stopRowInclusive); + } + + public byte[] getStartRow() { + return startRow; + } + + public byte[] getStopRow() { + return stopRow; + } + + /** Returns if start row is inclusive. */ + public boolean isStartRowInclusive() { + return startRowInclusive; + } + + /** Returns if stop row is inclusive. */ + public boolean isStopRowInclusive() { + return stopRowInclusive; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(startRow); + result = prime * result + Arrays.hashCode(stopRow); + result = prime * result + Objects.hash(startRowInclusive, stopRowInclusive); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof RowRangeModel)) { + return false; + } + RowRangeModel other = (RowRangeModel) obj; + return Arrays.equals(startRow, other.startRow) + && startRowInclusive == other.startRowInclusive && Arrays.equals(stopRow, other.stopRow) + && stopRowInclusive == other.stopRowInclusive; + } + + } + // A grab bag of fields, would have been a union if this were C. // These are null by default and will only be serialized if set (non null). @XmlAttribute @@ -242,7 +317,7 @@ public ByteArrayComparable build() { @XmlElement public List prefixes; @XmlElement - private List ranges; + private List ranges; @XmlElement public List timestamps; @@ -333,8 +408,7 @@ public FilterModel(Filter filter) { case MultiRowRangeFilter: this.ranges = new ArrayList<>(); for (RowRange range : ((MultiRowRangeFilter) filter).getRowRanges()) { - this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), - range.getStopRow(), range.isStopRowInclusive())); + this.ranges.add(new RowRangeModel(range)); } break; case PageFilter: @@ -438,7 +512,11 @@ public Filter build() { } break; case MultiRowRangeFilter: { - filter = new MultiRowRangeFilter(ranges); + ArrayList rowRanges = new ArrayList<>(ranges.size()); + for (RowRangeModel rangeModel : ranges) { + rowRanges.add(rangeModel.build()); + } + filter = new MultiRowRangeFilter(rowRanges); } break; case PageFilter: diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 5858b90177ba..865a6a18cd0c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.filter.FilterList.Operator; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.InclusiveStopFilter; +import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.filter.QualifierFilter; @@ -963,4 +964,18 @@ public void testFirstKeyOnlyFilter() throws Exception { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; verifyScanFull(s, kvs); } + + @Test + public void testMultiRowRangeFilter() throws Exception { + long expectedRows = 2; + long expectedKeys = colsPerRow; + List ranges = new ArrayList<>(); + // Both return only the third element, as the second one is deleted during initialization. + ranges.add(new MultiRowRangeFilter.RowRange(ROWS_ONE[1], true, ROWS_ONE[2], true)); + ranges.add(new MultiRowRangeFilter.RowRange(ROWS_TWO[0], false, ROWS_TWO[3], false)); + + Scan s = new Scan(); + s.setFilter(new MultiRowRangeFilter(ranges)); + verifyScan(s, expectedRows, expectedKeys); + } } From a16f45811ec54ce3ede229579177151675781862 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 30 May 2024 19:23:36 +0200 Subject: [PATCH 391/514] HBASE-28628 Use Base64.getUrlEncoder().withoutPadding() in REST tests (#5952) Signed-off-by: Ankit Singhal --- .../java/org/apache/hadoop/hbase/rest/RowResourceBase.java | 4 ++-- .../org/apache/hadoop/hbase/rest/TestGetAndPutResource.java | 4 ++-- .../org/apache/hadoop/hbase/rest/TestMultiRowResource.java | 4 ++-- .../test/java/org/apache/hadoop/hbase/rest/TestTableScan.java | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java index 6373c8515e01..004458d99e6b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -498,7 +498,7 @@ protected static Response deleteValue(String table, String row, String column) protected static Response deleteValueB64(String table, String row, String column, boolean useQueryString) throws IOException { StringBuilder path = new StringBuilder(); - Base64.Encoder encoder = Base64.getUrlEncoder(); + Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding(); path.append('/'); path.append(table); path.append('/'); @@ -543,7 +543,7 @@ protected static Response deleteRow(String table, String row) throws IOException protected static Response deleteRowB64(String table, String row, boolean useQueryString) throws IOException { StringBuilder path = new StringBuilder(); - Base64.Encoder encoder = Base64.getUrlEncoder(); + Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding(); path.append('/'); path.append(table); path.append('/'); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index d14c45e0532b..b20baea9df8c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -367,7 +367,7 @@ public void testUrlB64EncodedKeyQueryParam() throws IOException, JAXBException { setupValue1(); StringBuilder path = new StringBuilder(); - Base64.Encoder encoder = Base64.getUrlEncoder(); + Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding(); path.append('/'); path.append(TABLE); path.append('/'); @@ -387,7 +387,7 @@ public void testUrlB64EncodedKeyHeader() throws IOException, JAXBException { setupValue1(); StringBuilder path = new StringBuilder(); - Base64.Encoder encoder = Base64.getUrlEncoder(); + Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding(); path.append('/'); path.append(TABLE); path.append('/'); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index e4371dbcc2da..bfe5846e0710 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -77,7 +77,7 @@ public class TestMultiRowResource { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); - private static final Encoder base64UrlEncoder = java.util.Base64.getUrlEncoder(); + private static final Encoder base64UrlEncoder = java.util.Base64.getUrlEncoder().withoutPadding(); private static Client client; private static Configuration conf; @@ -191,7 +191,7 @@ public void testMultiCellGetJSONB64() throws IOException { client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2), extraHdr); StringBuilder path = new StringBuilder(); - Base64.Encoder encoder = Base64.getUrlEncoder(); + Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding(); path.append("/"); path.append(TABLE); path.append("/multiget/?row="); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java index 8f0e83c07e9d..2833e9d8deed 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java @@ -95,7 +95,7 @@ public class TestTableScan { private static int expectedRows3; private static Configuration conf; - private static final Encoder base64UrlEncoder = java.util.Base64.getUrlEncoder(); + private static final Encoder base64UrlEncoder = java.util.Base64.getUrlEncoder().withoutPadding(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); From abea48464ad2e26867bf6d1f0f002148f3dda3dc Mon Sep 17 00:00:00 2001 From: Umesh <9414umeshkumar@gmail.com> Date: Fri, 31 May 2024 20:32:14 +0530 Subject: [PATCH 392/514] HBASE-28420 Update the procedure's field to store for ServerRemoteProcedure (#5816) Co-authored-by: ukumawat Signed-off-by: Duo Zhang --- .../server/master/MasterProcedure.proto | 21 +++++++++ .../procedure/ServerRemoteProcedure.java | 44 ++++++++++------- .../procedure/SnapshotVerifyProcedure.java | 47 +++++++++++++------ .../procedure/SplitWALRemoteProcedure.java | 21 +++++++-- .../SwitchRpcThrottleRemoteProcedure.java | 25 +++++++--- .../ClaimReplicationQueueRemoteProcedure.java | 20 ++++++-- .../replication/RefreshPeerProcedure.java | 24 +++++++--- ...ncReplicationReplayWALRemoteProcedure.java | 19 ++++++-- .../procedure/TestServerRemoteProcedure.java | 5 +- 9 files changed, 166 insertions(+), 60 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 48d20b6bef27..9161a02c1800 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -29,6 +29,7 @@ import "rpc/RPC.proto"; import "server/Snapshot.proto"; import "server/master/Replication.proto"; import "server/master/RegionServerStatus.proto"; +import "server/ErrorHandling.proto"; // ============================================================================ // WARNING - Compatibility rules @@ -254,6 +255,8 @@ message SnapshotVerifyProcedureStateData { required SnapshotDescription snapshot = 1; required RegionInfo region = 2; optional ServerName target_server = 3; + optional ServerRemoteProcedureState state = 4; + optional ForeignExceptionMessage error = 5; } message SnapshotVerifyParameter { @@ -522,6 +525,8 @@ message RefreshPeerStateData { required ServerName target_server = 3; /** We need multiple stages for sync replication state transition **/ optional uint32 stage = 4 [default = 0]; + optional ServerRemoteProcedureState state = 5; + optional ForeignExceptionMessage error = 6; } message RefreshPeerParameter { @@ -613,6 +618,8 @@ message SyncReplicationReplayWALRemoteStateData { required string peer_id = 1; repeated string wal = 2; required ServerName target_server = 3; + optional ServerRemoteProcedureState state = 4; + optional ForeignExceptionMessage error = 5; } message ReplaySyncReplicationWALParameter { @@ -650,6 +657,14 @@ enum RegionRemoteProcedureBaseState { REGION_REMOTE_PROCEDURE_SERVER_CRASH = 4; } +enum ServerRemoteProcedureState { + SERVER_REMOTE_PROCEDURE_DISPATCH = 1; + SERVER_REMOTE_PROCEDURE_DISPATCH_FAIL = 2; + SERVER_REMOTE_PROCEDURE_REPORT_SUCCEED = 3; + SERVER_REMOTE_PROCEDURE_REPORT_FAILED = 4; + SERVER_REMOTE_PROCEDURE_SERVER_CRASH = 5; +} + message RegionRemoteProcedureBaseStateData { required RegionInfo region = 1; required ServerName target_server = 2; @@ -681,6 +696,8 @@ message SwitchRpcThrottleStateData { message SwitchRpcThrottleRemoteStateData { required ServerName target_server = 1; required bool rpc_throttle_enabled = 2; + optional ServerRemoteProcedureState state = 3; + optional ForeignExceptionMessage error = 4; } message SplitWALParameter { @@ -698,6 +715,8 @@ message SplitWALRemoteData{ required string wal_path = 1; required ServerName crashed_server=2; required ServerName worker = 3; + optional ServerRemoteProcedureState state = 4; + optional ForeignExceptionMessage error = 5; } enum SplitWALState{ @@ -715,6 +734,8 @@ message ClaimReplicationQueueRemoteStateData { required string queue = 2; required ServerName target_server = 3; optional ServerName source_server = 4; + optional ServerRemoteProcedureState state = 5; + optional ForeignExceptionMessage error = 6; } message ClaimReplicationQueueRemoteParameter { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java index d24471b938e2..0c89b6396417 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java @@ -30,6 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; + @InterfaceAudience.Private /** * The base class for Procedures that run {@link java.util.concurrent.Callable}s on a (remote) @@ -63,34 +65,38 @@ *

    * If sending the operation to remote RS failed, dispatcher will call remoteCallFailed() to handle * this which calls remoteOperationDone with the exception. If the targetServer crashed but this - * procedure has no response, than dispatcher will call remoteOperationFailed() which also calls - * remoteOperationDone with the exception. If the operation is successful, then - * remoteOperationCompleted will be called and actually calls the remoteOperationDone without - * exception. In remoteOperationDone, we'll check if the procedure is already get wake up by others. - * Then developer could implement complete() based on their own purpose. But basic logic is that if - * operation succeed, set succ to true and do the clean work. If operation failed and require to - * resend it to the same server, leave the succ as false. If operation failed and require to resend - * it to another server, set succ to true and upper layer should be able to find out this operation - * not work and send a operation to another server. + * procedure has no response or if we receive failed response, then dispatcher will call + * remoteOperationFailed() which also calls remoteOperationDone with the exception. If the operation + * is successful, then remoteOperationCompleted will be called and actually calls the + * remoteOperationDone without exception. In remoteOperationDone, we'll check if the procedure is + * already get wake up by others. Then developer could implement complete() based on their own + * purpose. But basic logic is that if operation succeed, set succ to true and do the clean work. If + * operation failed and require to resend it to the same server, leave the succ as false. If + * operation failed and require to resend it to another server, set succ to true and upper layer + * should be able to find out this operation not work and send a operation to another server. */ public abstract class ServerRemoteProcedure extends Procedure implements RemoteProcedureDispatcher.RemoteProcedure { protected static final Logger LOG = LoggerFactory.getLogger(ServerRemoteProcedure.class); protected ProcedureEvent event; protected ServerName targetServer; - protected boolean dispatched; - protected boolean succ; + // after remoteProcedureDone we require error field to decide the next state + protected Throwable remoteError; + protected MasterProcedureProtos.ServerRemoteProcedureState state = + MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH; - protected abstract void complete(MasterProcedureEnv env, Throwable error); + protected abstract boolean complete(MasterProcedureEnv env, Throwable error); @Override protected synchronized Procedure[] execute(MasterProcedureEnv env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { - if (dispatched) { - if (succ) { + if ( + state != MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH + ) { + if (complete(env, this.remoteError)) { return null; } - dispatched = false; + state = MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH; } try { env.getRemoteDispatcher().addOperationToNode(targetServer, this); @@ -99,7 +105,6 @@ protected synchronized Procedure[] execute(MasterProcedureEn + "be retried to send to another server", this.getProcId(), targetServer); return null; } - dispatched = true; event = new ProcedureEvent<>(this); event.suspendIfNotReady(this); throw new ProcedureSuspendedException(); @@ -113,17 +118,20 @@ protected synchronized void completionCleanup(MasterProcedureEnv env) { @Override public synchronized void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, IOException exception) { + state = MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH_FAIL; remoteOperationDone(env, exception); } @Override public synchronized void remoteOperationCompleted(MasterProcedureEnv env) { + state = MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_REPORT_SUCCEED; remoteOperationDone(env, null); } @Override public synchronized void remoteOperationFailed(MasterProcedureEnv env, RemoteProcedureException error) { + state = MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_REPORT_FAILED; remoteOperationDone(env, error); } @@ -137,7 +145,9 @@ synchronized void remoteOperationDone(MasterProcedureEnv env, Throwable error) { getProcId()); return; } - complete(env, error); + this.remoteError = error; + // below persistence is added so that if report goes to last active master, it throws exception + env.getMasterServices().getMasterProcedureExecutor().getStore().update(this); event.wake(env.getProcedureScheduler()); event = null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java index 651822ff5b2a..5d3b25f7b14f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java @@ -32,12 +32,15 @@ import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; import org.apache.hadoop.hbase.regionserver.SnapshotVerifyCallable; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotVerifyParameter; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotVerifyProcedureStateData; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @@ -75,7 +78,8 @@ protected boolean abort(MasterProcedureEnv env) { } @Override - protected synchronized void complete(MasterProcedureEnv env, Throwable error) { + protected synchronized boolean complete(MasterProcedureEnv env, Throwable error) { + boolean isProcedureCompleted = false; try { if (error != null) { if (error instanceof RemoteProcedureException) { @@ -83,23 +87,19 @@ protected synchronized void complete(MasterProcedureEnv env, Throwable error) { Throwable remoteEx = unwrapRemoteProcedureException((RemoteProcedureException) error); if (remoteEx instanceof CorruptedSnapshotException) { // snapshot is corrupted, will touch a flag file and finish the procedure - succ = true; + isProcedureCompleted = true; SnapshotProcedure parent = env.getMasterServices().getMasterProcedureExecutor() .getProcedure(SnapshotProcedure.class, getParentProcId()); if (parent != null) { parent.markSnapshotCorrupted(); } - } else { - // unexpected exception in remote server, will retry on other servers - succ = false; - } - } else { - // the mostly like thing is that remote call failed, will retry on other servers - succ = false; - } + } // else unexpected exception in remote server, will retry on other servers, + // procedureCompleted will stay false + } // else the mostly like thing is that remote call failed, will retry on other servers, + // procedureCompleted will stay false } else { // remote operation finished without error - succ = true; + isProcedureCompleted = true; } } catch (IOException e) { // if we can't create the flag file, then mark the current procedure as FAILED @@ -112,6 +112,7 @@ protected synchronized void complete(MasterProcedureEnv env, Throwable error) { env.getMasterServices().getSnapshotManager().releaseSnapshotVerifyWorker(this, targetServer, env.getProcedureScheduler()); } + return isProcedureCompleted; } // we will wrap remote exception into a RemoteProcedureException, @@ -126,7 +127,9 @@ protected synchronized Procedure[] execute(MasterProcedureEn try { // if we've already known the snapshot is corrupted, then stop scheduling // the new procedures and the undispatched procedures - if (!dispatched) { + if ( + state == MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH + ) { SnapshotProcedure parent = env.getMasterServices().getMasterProcedureExecutor() .getProcedure(SnapshotProcedure.class, getParentProcId()); if (parent != null && parent.isSnapshotCorrupted()) { @@ -134,14 +137,19 @@ protected synchronized Procedure[] execute(MasterProcedureEn } } // acquire a worker - if (!dispatched && targetServer == null) { + if ( + state == MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH + && targetServer == null + ) { targetServer = env.getMasterServices().getSnapshotManager().acquireSnapshotVerifyWorker(this); } // send remote request Procedure[] res = super.execute(env); // retry if necessary - if (!dispatched) { + if ( + state == MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_DISPATCH + ) { // the mostly like thing is that a FailedRemoteDispatchException is thrown. // we need to retry on another remote server targetServer = null; @@ -177,10 +185,15 @@ protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { SnapshotVerifyProcedureStateData.Builder builder = SnapshotVerifyProcedureStateData.newBuilder(); - builder.setSnapshot(snapshot).setRegion(ProtobufUtil.toRegionInfo(region)); + builder.setSnapshot(snapshot).setRegion(ProtobufUtil.toRegionInfo(region)).setState(state); if (targetServer != null) { builder.setTargetServer(ProtobufUtil.toServerName(targetServer)); } + if (this.remoteError != null) { + ErrorHandlingProtos.ForeignExceptionMessage fem = + ForeignExceptionUtil.toProtoForeignException(remoteError); + builder.setError(fem); + } serializer.serialize(builder.build()); } @@ -190,9 +203,13 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws serializer.deserialize(SnapshotVerifyProcedureStateData.class); this.snapshot = data.getSnapshot(); this.region = ProtobufUtil.toRegionInfo(data.getRegion()); + this.state = data.getState(); if (data.hasTargetServer()) { this.targetServer = ProtobufUtil.toServerName(data.getTargetServer()); } + if (data.hasError()) { + this.remoteError = ForeignExceptionUtil.toException(data.getError()); + } } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java index 3dc49073c720..1e6bb78e250c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java @@ -25,12 +25,14 @@ import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; import org.apache.hadoop.hbase.regionserver.SplitWALCallable; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; /** @@ -70,7 +72,12 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO MasterProcedureProtos.SplitWALRemoteData.Builder builder = MasterProcedureProtos.SplitWALRemoteData.newBuilder(); builder.setWalPath(walPath).setWorker(ProtobufUtil.toServerName(targetServer)) - .setCrashedServer(ProtobufUtil.toServerName(crashedServer)); + .setCrashedServer(ProtobufUtil.toServerName(crashedServer)).setState(state); + if (this.remoteError != null) { + ErrorHandlingProtos.ForeignExceptionMessage fem = + ForeignExceptionUtil.toProtoForeignException(remoteError); + builder.setError(fem); + } serializer.serialize(builder.build()); } @@ -81,6 +88,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws walPath = data.getWalPath(); targetServer = ProtobufUtil.toServerName(data.getWorker()); crashedServer = ProtobufUtil.toServerName(data.getCrashedServer()); + state = data.getState(); + if (data.hasError()) { + this.remoteError = ForeignExceptionUtil.toException(data.getError()); + } } @Override @@ -92,21 +103,21 @@ public Optional remoteCallBuild(Maste } @Override - protected void complete(MasterProcedureEnv env, Throwable error) { + protected boolean complete(MasterProcedureEnv env, Throwable error) { if (error == null) { try { env.getMasterServices().getSplitWALManager().archive(walPath); } catch (IOException e) { LOG.warn("Failed split of {}; ignore...", walPath, e); } - succ = true; + return true; } else { if (error instanceof DoNotRetryIOException) { LOG.warn("Sent {} to wrong server {}, try another", walPath, targetServer, error); - succ = true; + return true; } else { LOG.warn("Failed split of {}, retry...", walPath, error); - succ = false; + return false; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java index 53a4af1b5104..668897cd9a4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java @@ -23,11 +23,13 @@ import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; import org.apache.hadoop.hbase.replication.regionserver.SwitchRpcThrottleRemoteCallable; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SwitchRpcThrottleRemoteStateData; /** @@ -59,9 +61,16 @@ protected boolean abort(MasterProcedureEnv env) { @Override protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { - SwitchRpcThrottleRemoteStateData.newBuilder() - .setTargetServer(ProtobufUtil.toServerName(targetServer)) - .setRpcThrottleEnabled(rpcThrottleEnabled).build(); + SwitchRpcThrottleRemoteStateData.Builder builder = + SwitchRpcThrottleRemoteStateData.newBuilder(); + builder.setTargetServer(ProtobufUtil.toServerName(targetServer)) + .setRpcThrottleEnabled(rpcThrottleEnabled).setState(state).build(); + if (this.remoteError != null) { + ErrorHandlingProtos.ForeignExceptionMessage fem = + ForeignExceptionUtil.toProtoForeignException(remoteError); + builder.setError(fem); + } + serializer.serialize(builder.build()); } @Override @@ -70,6 +79,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws serializer.deserialize(SwitchRpcThrottleRemoteStateData.class); targetServer = ProtobufUtil.toServerName(data.getTargetServer()); rpcThrottleEnabled = data.getRpcThrottleEnabled(); + state = data.getState(); + if (data.hasError()) { + this.remoteError = ForeignExceptionUtil.toException(data.getError()); + } } @Override @@ -99,13 +112,13 @@ public ServerOperationType getServerOperationType() { } @Override - protected void complete(MasterProcedureEnv env, Throwable error) { + protected boolean complete(MasterProcedureEnv env, Throwable error) { if (error != null) { LOG.warn("Failed to switch rpc throttle to {} on server {}", rpcThrottleEnabled, targetServer, error); - this.succ = false; + return false; } else { - this.succ = true; + return true; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java index d3aeeba541a2..e6cf46216759 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java @@ -35,11 +35,13 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueId; import org.apache.hadoop.hbase.replication.regionserver.ClaimReplicationQueueCallable; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ClaimReplicationQueueRemoteParameter; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ClaimReplicationQueueRemoteStateData; @@ -116,12 +118,12 @@ public ServerOperationType getServerOperationType() { } @Override - protected void complete(MasterProcedureEnv env, Throwable error) { + protected boolean complete(MasterProcedureEnv env, Throwable error) { if (error != null) { LOG.warn("Failed to claim replication queue {} on server {} ", queueId, targetServer, error); - this.succ = false; + return false; } else { - this.succ = true; + return true; } } @@ -144,7 +146,13 @@ protected boolean waitInitialized(MasterProcedureEnv env) { protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { ClaimReplicationQueueRemoteStateData.Builder builder = ClaimReplicationQueueRemoteStateData .newBuilder().setCrashedServer(ProtobufUtil.toServerName(queueId.getServerName())) - .setQueue(queueId.getPeerId()).setTargetServer(ProtobufUtil.toServerName(targetServer)); + .setQueue(queueId.getPeerId()).setTargetServer(ProtobufUtil.toServerName(targetServer)) + .setState(state); + if (this.remoteError != null) { + ErrorHandlingProtos.ForeignExceptionMessage fem = + ForeignExceptionUtil.toProtoForeignException(remoteError); + builder.setError(fem); + } queueId.getSourceServerName() .ifPresent(sourceServer -> builder.setSourceServer(ProtobufUtil.toServerName(sourceServer))); serializer.serialize(builder.build()); @@ -157,11 +165,15 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws targetServer = ProtobufUtil.toServerName(data.getTargetServer()); ServerName crashedServer = ProtobufUtil.toServerName(data.getCrashedServer()); String queue = data.getQueue(); + state = data.getState(); if (data.hasSourceServer()) { queueId = new ReplicationQueueId(crashedServer, queue, ProtobufUtil.toServerName(data.getSourceServer())); } else { queueId = new ReplicationQueueId(crashedServer, queue); } + if (data.hasError()) { + this.remoteError = ForeignExceptionUtil.toException(data.getError()); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java index 0e1b9a3b3810..ef997fba4172 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java @@ -28,11 +28,13 @@ import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure; import org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerStateData; @@ -121,13 +123,13 @@ public Optional remoteCallBuild(MasterProcedureEnv env, ServerN } @Override - protected void complete(MasterProcedureEnv env, Throwable error) { + protected boolean complete(MasterProcedureEnv env, Throwable error) { if (error != null) { LOG.warn("Refresh peer {} for {} on {} failed", peerId, type, targetServer, error); - this.succ = false; + return false; } else { LOG.info("Refresh peer {} for {} on {} suceeded", peerId, type, targetServer); - this.succ = true; + return true; } } @@ -149,9 +151,15 @@ protected boolean waitInitialized(MasterProcedureEnv env) { @Override protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { - serializer.serialize( - RefreshPeerStateData.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type)) - .setTargetServer(ProtobufUtil.toServerName(targetServer)).setStage(stage).build()); + RefreshPeerStateData.Builder builder = RefreshPeerStateData.newBuilder(); + if (this.remoteError != null) { + ErrorHandlingProtos.ForeignExceptionMessage fem = + ForeignExceptionUtil.toProtoForeignException(remoteError); + builder.setError(fem); + } + serializer.serialize(builder.setPeerId(peerId).setType(toPeerModificationType(type)) + .setTargetServer(ProtobufUtil.toServerName(targetServer)).setStage(stage).setState(state) + .build()); } @Override @@ -161,5 +169,9 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws type = toPeerOperationType(data.getType()); targetServer = ProtobufUtil.toServerName(data.getTargetServer()); stage = data.getStage(); + state = data.getState(); + if (data.hasError()) { + this.remoteError = ForeignExceptionUtil.toException(data.getError()); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java index 4d3bf236716f..40a00fdd4daf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java @@ -29,11 +29,13 @@ import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; import org.apache.hadoop.hbase.replication.regionserver.ReplaySyncReplicationWALCallable; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReplaySyncReplicationWALParameter; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SyncReplicationReplayWALRemoteStateData; @@ -71,14 +73,14 @@ public Optional remoteCallBuild(MasterProcedureEnv env, ServerN ReplaySyncReplicationWALCallable.class, builder.build().toByteArray())); } - protected void complete(MasterProcedureEnv env, Throwable error) { + protected boolean complete(MasterProcedureEnv env, Throwable error) { if (error != null) { LOG.warn("Replay wals {} on {} failed for peer id={}", wals, targetServer, peerId, error); - this.succ = false; + return false; } else { truncateWALs(env); LOG.info("Replay wals {} on {} succeed for peer id={}", wals, targetServer, peerId); - this.succ = true; + return true; } } @@ -125,8 +127,13 @@ protected boolean abort(MasterProcedureEnv env) { protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { SyncReplicationReplayWALRemoteStateData.Builder builder = SyncReplicationReplayWALRemoteStateData.newBuilder().setPeerId(peerId) - .setTargetServer(ProtobufUtil.toServerName(targetServer)); + .setTargetServer(ProtobufUtil.toServerName(targetServer)).setState(state); wals.stream().forEach(builder::addWal); + if (this.remoteError != null) { + ErrorHandlingProtos.ForeignExceptionMessage fem = + ForeignExceptionUtil.toProtoForeignException(remoteError); + builder.setError(fem); + } serializer.serialize(builder.build()); } @@ -138,6 +145,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws wals = new ArrayList<>(); data.getWalList().forEach(wals::add); targetServer = ProtobufUtil.toServerName(data.getTargetServer()); + state = data.getState(); + if (data.hasError()) { + this.remoteError = ForeignExceptionUtil.toException(data.getError()); + } } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java index b7c2d5099879..d3fca2f59895 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java @@ -199,9 +199,8 @@ public synchronized void remoteOperationFailed(MasterProcedureEnv env, } @Override - public void complete(MasterProcedureEnv env, Throwable error) { - this.succ = true; - return; + public boolean complete(MasterProcedureEnv env, Throwable error) { + return true; } @Override From debb1921edc79ee7d712ae19381b777d4aafdff0 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Fri, 31 May 2024 11:15:11 -0700 Subject: [PATCH 393/514] HBASE-28546 Make WAL rolling exception clear (#5848) Signed-off-by: Duo Zhang --- .../hbase/regionserver/wal/AsyncProtobufLogWriter.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index f10f39222722..bb874a001d2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.io.ByteBufferWriter; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutputHelper; @@ -205,9 +206,11 @@ private long writeWALMetadata(Consumer> action) throws I InterruptedIOException ioe = new InterruptedIOException(); ioe.initCause(e); throw ioe; - } catch (ExecutionException | TimeoutException e) { + } catch (ExecutionException e) { Throwables.propagateIfPossible(e.getCause(), IOException.class); throw new RuntimeException(e.getCause()); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); } } From fefb1867f949d44ac6aa6ade66369104d2e7e7ed Mon Sep 17 00:00:00 2001 From: wx1458451310 <1458451310@qq.com> Date: Sat, 1 Jun 2024 23:09:49 +0800 Subject: [PATCH 394/514] HBASE-28630 Doc for the config of hbase.ipc.sserver.callqueue.read.ratio is misleading (#5953) Co-authored-by: Wangx Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/performance.adoc | 2 +- src/main/asciidoc/_chapters/schema_design.adoc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/asciidoc/_chapters/performance.adoc b/src/main/asciidoc/_chapters/performance.adoc index 1daeac91dd0f..d7f18f59c1b3 100644 --- a/src/main/asciidoc/_chapters/performance.adoc +++ b/src/main/asciidoc/_chapters/performance.adoc @@ -261,7 +261,7 @@ Note that you always have at least one write queue, no matter what setting you u Given a value of `10` for `hbase.ipc.server.num.callqueue`, 3 queues would be used for reads and 7 for writes. * A value of `.5` uses the same number of read queues and write queues. Given a value of `10` for `hbase.ipc.server.num.callqueue`, 5 queues would be used for reads and 5 for writes. -* A value of `.6` uses 60% of the queues for reading and 40% for reading. +* A value of `.6` uses 60% of the queues for reading and 40% for writing. Given a value of `10` for `hbase.ipc.server.num.callqueue`, 6 queues would be used for reads and 4 for writes. * A value of `1.0` uses one queue to process write requests, and all other queues process read requests. A value higher than `1.0` has the same effect as a value of `1.0`. diff --git a/src/main/asciidoc/_chapters/schema_design.adoc b/src/main/asciidoc/_chapters/schema_design.adoc index 5663f658e3f7..e2ab0586e256 100644 --- a/src/main/asciidoc/_chapters/schema_design.adoc +++ b/src/main/asciidoc/_chapters/schema_design.adoc @@ -1120,8 +1120,8 @@ If you don't have time to build it both ways and compare, my advice would be to - A value between `0` and `1` allocates the number of queues proportionally to the number of handlers. For instance, a value of `.5` shares one queue between each two handlers. * Use `hbase.ipc.server.callqueue.read.ratio` (`hbase.ipc.server.callqueue.read.share` in 0.98) to split the call queues into read and write queues: - `0.5` means there will be the same number of read and write queues -- `< 0.5` for more read than write -- `> 0.5` for more write than read +- `< 0.5` for more write than read +- `> 0.5` for more read than write * Set `hbase.ipc.server.callqueue.scan.ratio` (HBase 1.0+) to split read call queues into small-read and long-read queues: - 0.5 means that there will be the same number of short-read and long-read queues - `< 0.5` for more short-read From 77ae12b8a22d21b0da0cb9e8a7d724db90d668fa Mon Sep 17 00:00:00 2001 From: guluo Date: Sat, 1 Jun 2024 23:11:48 +0800 Subject: [PATCH 395/514] HBASE-28619 Fix the inaccurate message when snapshot doesn't exist (#5948) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 5822657fd88b..634f54faa63d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2125,8 +2125,8 @@ public CompletableFuture restoreSnapshot(String snapshotName, boolean take } } if (tableName == null) { - future.completeExceptionally(new RestoreSnapshotException( - "Unable to find the table name for snapshot=" + snapshotName)); + future.completeExceptionally( + new RestoreSnapshotException("The snapshot " + snapshotName + " does not exist.")); return; } final TableName finalTableName = tableName; From 13af64dc1cbd0e90c3a98071deeb815520b624ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 21:51:03 +0800 Subject: [PATCH 396/514] HBASE-28635 Bump io.airlift:aircompressor from 0.24 to 0.27 (#5958) Bumps [io.airlift:aircompressor](https://github.com/airlift/aircompressor) from 0.24 to 0.27. - [Commits](https://github.com/airlift/aircompressor/compare/0.24...0.27) --- updated-dependencies: - dependency-name: io.airlift:aircompressor dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Pankaj Kumar Signed-off-by: Duo Zhang --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index a34fe50d7059..7afa8b72c37c 100644 --- a/pom.xml +++ b/pom.xml @@ -919,7 +919,7 @@ 2.27.2 3.12.0 - 0.24 + 0.27 1.11.0 1.8.0 1.1.10.4 From c0fb41fea61d2ee8d64b63e793abab9acb990d35 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 4 Jun 2024 14:32:04 +0200 Subject: [PATCH 397/514] HBASE-28622 FilterListWithAND can swallow SEEK_NEXT_USING_HINT (#5955) Signed-off-by: Duo Zhang --- .../hbase/filter/ColumnPaginationFilter.java | 4 ++ .../hbase/filter/ColumnPrefixFilter.java | 2 +- .../hbase/filter/ColumnRangeFilter.java | 2 +- .../hbase/filter/FilterListWithAND.java | 64 +++++++++++++++++-- .../hadoop/hbase/filter/FuzzyRowFilter.java | 2 +- .../hadoop/hbase/filter/HintingFilter.java | 29 +++++++++ .../hbase/filter/MultiRowRangeFilter.java | 2 +- .../filter/MultipleColumnPrefixFilter.java | 2 +- .../hadoop/hbase/filter/TimestampsFilter.java | 2 +- .../hadoop/hbase/filter/TestFilterList.java | 14 ++-- 10 files changed, 106 insertions(+), 17 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/HintingFilter.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java index 88f18100bbd9..bfc693c7f22a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -38,6 +38,10 @@ * can be used for row-based indexing, where references to other tables are stored across many * columns, in order to efficient lookups and paginated results for end users. Only most recent * versions are considered for pagination. + * @apiNote This filter is in awkward place, as even though it can return SEEK_NEXT_USING_HINT, it + * also maintains an internal row state, so it is not marked as HintingFilter. Hinted seek + * information may be lost when used in a MUST_PASS_ALL FilterList, which can result in + * suboptimal performance. */ @InterfaceAudience.Public public class ColumnPaginationFilter extends FilterBase { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java index 3b8df1d15c68..9b477ec06cc7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java @@ -39,7 +39,7 @@ * with columns like 'ball', 'act'. */ @InterfaceAudience.Public -public class ColumnPrefixFilter extends FilterBase { +public class ColumnPrefixFilter extends FilterBase implements HintingFilter { protected byte[] prefix = null; public ColumnPrefixFilter(final byte[] prefix) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java index c9a7902d1e46..bbfec008c2c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -43,7 +43,7 @@ * maxColumnInclusive specify if the ranges are inclusive or not. */ @InterfaceAudience.Public -public class ColumnRangeFilter extends FilterBase { +public class ColumnRangeFilter extends FilterBase implements HintingFilter { protected byte[] minColumn = null; protected boolean minColumnInclusive = true; protected byte[] maxColumn = null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java index 45e06f444547..a5e1eec45401 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java @@ -33,6 +33,7 @@ public class FilterListWithAND extends FilterListBase { private List seekHintFilters = new ArrayList<>(); + private boolean[] hintingFilters; public FilterListWithAND(List filters) { super(filters); @@ -40,6 +41,7 @@ public FilterListWithAND(List filters) { // sub-filters (because all sub-filters return INCLUDE*). So here, fill this array with true. we // keep this in FilterListWithAND for abstracting the transformCell() in FilterListBase. subFiltersIncludedCell = new ArrayList<>(Collections.nCopies(filters.size(), true)); + cacheHintingFilters(); } @Override @@ -49,6 +51,7 @@ public void addFilterLists(List filters) { } this.filters.addAll(filters); this.subFiltersIncludedCell.addAll(Collections.nCopies(filters.size(), true)); + this.cacheHintingFilters(); } @Override @@ -57,6 +60,20 @@ protected String formatLogFilters(List logFilters) { logFilters.toString()); } + /** + * As checks for this are in the hot path, we want them as fast as possible, so we are caching the + * status in an array. + */ + private void cacheHintingFilters() { + int filtersSize = filters.size(); + hintingFilters = new boolean[filtersSize]; + for (int i = 0; i < filtersSize; i++) { + if (filters.get(i) instanceof HintingFilter) { + hintingFilters[i] = true; + } + } + } + /** * FilterList with MUST_PASS_ALL choose the maximal forward step among sub-filters in filter list. * Let's call it: The Maximal Step Rule. So if filter-A in filter list return INCLUDE and filter-B @@ -169,10 +186,14 @@ public ReturnCode filterCell(Cell c) throws IOException { } ReturnCode rc = ReturnCode.INCLUDE; this.seekHintFilters.clear(); - for (int i = 0, n = filters.size(); i < n; i++) { + int i = 0; + int n = filters.size(); + for (; i < n; i++) { Filter filter = filters.get(i); if (filter.filterAllRemaining()) { - return ReturnCode.NEXT_ROW; + rc = ReturnCode.NEXT_ROW; + // See comment right after this loop + break; } ReturnCode localRC; localRC = filter.filterCell(c); @@ -184,9 +205,26 @@ public ReturnCode filterCell(Cell c) throws IOException { // otherwise we may mess up the global state (such as offset, count..) in the following // sub-filters. (HBASE-20565) if (!isIncludeRelatedReturnCode(rc)) { - return rc; + // See comment right after this loop + break; + } + } + // We have the preliminary return code. However, if there are remaining uncalled hintingFilters, + // they may return hints that allow us to seek ahead and skip reading and processing a lot of + // cells. + // Process the remaining hinting filters so that we can get all seek hints. + // The farthest key is computed in getNextCellHint() + if (++i < n) { + for (; i < n; i++) { + if (hintingFilters[i]) { + Filter filter = filters.get(i); + if (filter.filterCell(c) == ReturnCode.SEEK_NEXT_USING_HINT) { + seekHintFilters.add(filter); + } + } } } + if (!seekHintFilters.isEmpty()) { return ReturnCode.SEEK_NEXT_USING_HINT; } @@ -206,17 +244,29 @@ public boolean filterRowKey(Cell firstRowCell) throws IOException { if (isEmpty()) { return super.filterRowKey(firstRowCell); } - boolean retVal = false; + boolean anyRowKeyFiltered = false; + boolean anyHintingPassed = false; for (int i = 0, n = filters.size(); i < n; i++) { Filter filter = filters.get(i); - if (filter.filterAllRemaining() || filter.filterRowKey(firstRowCell)) { + if (filter.filterAllRemaining()) { + // We don't need to care about any later filters, as we end the scan immediately. + // TODO HBASE-28633 in the normal code path, filterAllRemaining() always gets checked + // before filterRowKey(). We should be able to remove this check. + return true; + } else if (filter.filterRowKey(firstRowCell)) { // Can't just return true here, because there are some filters (such as PrefixFilter) which // will catch the row changed event by filterRowKey(). If we return early here, those // filters will have no chance to update their row state. - retVal = true; + anyRowKeyFiltered = true; + } else if (hintingFilters[i]) { + // If filterRowKey returns false and this is a hinting filter, then we must not filter this + // rowkey. + // Otherwise this sub-filter doesn't get a chance to provide a seek hint, and the scan may + // regress into a full scan. + anyHintingPassed = true; } } - return retVal; + return anyRowKeyFiltered && !anyHintingPassed; } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index fd5a81d694e3..244dc9c7ca5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -65,7 +65,7 @@ * I.e. fuzzy info tells the matching mask is "????_99_????_01", where at ? can be any value. */ @InterfaceAudience.Public -public class FuzzyRowFilter extends FilterBase { +public class FuzzyRowFilter extends FilterBase implements HintingFilter { private static final boolean UNSAFE_UNALIGNED = HBasePlatformDependent.unaligned(); private List> fuzzyKeysData; // Used to record whether we want to skip the current row. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/HintingFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/HintingFilter.java new file mode 100644 index 000000000000..ed68bf82d20f --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/HintingFilter.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Marker interface for filters that may return SEEK_NEXT_USING_HINT. This marker interface + * indicates that when it's used in a MUST_PASS_ALL FilterList then filterCell() must always be + * called if filterRowKey() returned false. + */ +@InterfaceAudience.Public +public interface HintingFilter { +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index 85fae7b0a992..032a14eb62be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -50,7 +50,7 @@ * fast-forwarding during scan. Thus, the scan will be quite efficient. */ @InterfaceAudience.Public -public class MultiRowRangeFilter extends FilterBase { +public class MultiRowRangeFilter extends FilterBase implements HintingFilter { private static final int ROW_BEFORE_FIRST_RANGE = -1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index 168257cd2f71..00d84fee9e38 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -42,7 +42,7 @@ * with columns like 'ball', 'act'. */ @InterfaceAudience.Public -public class MultipleColumnPrefixFilter extends FilterBase { +public class MultipleColumnPrefixFilter extends FilterBase implements HintingFilter { private static final Logger LOG = LoggerFactory.getLogger(MultipleColumnPrefixFilter.class); protected byte[] hint = null; protected TreeSet sortedPrefixes = createTreeSet(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java index 235691ef7cb1..cf2aa688d5a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -43,7 +43,7 @@ * {@link org.apache.hadoop.hbase.client.Scan#setTimestamp(long)}. */ @InterfaceAudience.Public -public class TimestampsFilter extends FilterBase { +public class TimestampsFilter extends FilterBase implements HintingFilter { private final boolean canHint; TreeSet timestamps; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index 6c278b362c85..f64381a8a22e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -644,6 +644,12 @@ public int hashCode() { } } + private static class HintingMockFilter extends MockFilter implements HintingFilter { + public HintingMockFilter(ReturnCode targetRetCode) { + super(targetRetCode); + } + } + @Test public void testShouldPassCurrentCellToFilter() throws IOException { KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("a"), 1, @@ -729,7 +735,7 @@ public void testTheMaximalRule() throws IOException { MockFilter filter3 = new MockFilter(ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW); MockFilter filter4 = new MockFilter(ReturnCode.NEXT_COL); MockFilter filter5 = new MockFilter(ReturnCode.SKIP); - MockFilter filter6 = new MockFilter(ReturnCode.SEEK_NEXT_USING_HINT); + MockFilter filter6 = new HintingMockFilter(ReturnCode.SEEK_NEXT_USING_HINT); MockFilter filter7 = new MockFilter(ReturnCode.NEXT_ROW); FilterList filterList = new FilterList(Operator.MUST_PASS_ALL, filter1, filter2); @@ -739,10 +745,10 @@ public void testTheMaximalRule() throws IOException { assertEquals(ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, filterList.filterCell(kv1)); filterList = new FilterList(Operator.MUST_PASS_ALL, filter4, filter5, filter6); - assertEquals(ReturnCode.NEXT_COL, filterList.filterCell(kv1)); + assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, filterList.filterCell(kv1)); filterList = new FilterList(Operator.MUST_PASS_ALL, filter4, filter6); - assertEquals(ReturnCode.NEXT_COL, filterList.filterCell(kv1)); + assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, filterList.filterCell(kv1)); filterList = new FilterList(Operator.MUST_PASS_ALL, filter3, filter1); assertEquals(ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW, filterList.filterCell(kv1)); @@ -767,7 +773,7 @@ public void testTheMinimalRule() throws IOException { MockFilter filter3 = new MockFilter(ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW); MockFilter filter4 = new MockFilter(ReturnCode.NEXT_COL); MockFilter filter5 = new MockFilter(ReturnCode.SKIP); - MockFilter filter6 = new MockFilter(ReturnCode.SEEK_NEXT_USING_HINT); + MockFilter filter6 = new HintingMockFilter(ReturnCode.SEEK_NEXT_USING_HINT); FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, filter1, filter2); assertEquals(ReturnCode.INCLUDE, filterList.filterCell(kv1)); From 8afd93e78b98d13760c1d7149839fb2acf6b7f97 Mon Sep 17 00:00:00 2001 From: guluo Date: Wed, 5 Jun 2024 23:18:51 +0800 Subject: [PATCH 398/514] HBASE-28614 Introduce a field to display whether the snapshot is expired (#5947) Signed-off-by: Pankaj Kumar Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang --- .../hbase-webapps/master/snapshot.jsp | 5 +++++ .../hbase-webapps/master/snapshotsStats.jsp | 5 +++++ .../hbase-webapps/master/userSnapshots.jsp | 5 +++++ .../ruby/shell/commands/list_snapshots.rb | 19 +++++++++++++++++-- 4 files changed, 32 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index 9b1328a3a32b..e85cab95d7e5 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -25,6 +25,7 @@ import="org.apache.hadoop.hbase.http.InfoServer" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" + import="org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils" import="org.apache.hadoop.util.StringUtils" import="org.apache.hadoop.hbase.TableName" %> @@ -98,6 +99,7 @@ Type Format Version State + Expired @@ -124,6 +126,9 @@ <% } else { %> ok <% } %> + + <%= SnapshotDescriptionUtils.isExpiredSnapshot(snapshotTtl, snapshot.getCreationTime(), System.currentTimeMillis()) ? "Yes" : "No" %> +

    diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp index becdc68442db..6202d7409b5c 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp @@ -26,6 +26,7 @@ import="org.apache.hadoop.fs.Path" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" + import="org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.util.StringUtils" import="org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription" @@ -68,6 +69,7 @@ Creation Time Owner TTL + Expired Shared Storefile Size Mob Storefile Size Archived Storefile Size @@ -94,6 +96,9 @@ .format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL)%> <% } %> + + <%= SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDesc.getTtl(), snapshotDesc.getCreationTime(), System.currentTimeMillis()) ? "Yes" : "No" %> + <%= StringUtils.humanReadableInt(stats.getSharedStoreFilesSize()) %> <%= StringUtils.humanReadableInt(stats.getMobStoreFilesSize()) %> <%= StringUtils.humanReadableInt(stats.getArchivedStoreFileSize()) %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp index 0b741e1089fd..97cd477f6d8d 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp @@ -20,6 +20,7 @@ <%@ page contentType="text/plain;charset=UTF-8" import="java.util.List" import="java.util.Date" + import="org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils" import="org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.TableName" @@ -38,6 +39,7 @@ Creation Time Owner TTL + Expired <% for (SnapshotDescription snapshotDesc : snapshots){ %> <% TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); %> @@ -51,6 +53,9 @@ <%= snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> + + <%= SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDesc.getTtl(), snapshotDesc.getCreationTime(), System.currentTimeMillis()) ? "Yes" : "No" %> + <% } %>

    <%= snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

    diff --git a/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb b/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb index 6a173993654a..3b34cb993e60 100644 --- a/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb +++ b/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb @@ -18,6 +18,8 @@ require 'time' +java_import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils + module Shell module Commands class ListSnapshots < Command @@ -34,12 +36,25 @@ def help end def command(regex = '.*') - formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME']) + formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME + TTL(Sec)']) list = admin.list_snapshot(regex) list.each do |snapshot| creation_time = Time.at(snapshot.getCreationTime / 1000).to_s - formatter.row([snapshot.getName, snapshot.getTableNameAsString + ' (' + creation_time + ')']) + ttl = snapshot.getTtl + if ttl == 0 + ttl_info = 'FOREVER' + else + now_timestamp = (Time.now.to_f * 1000).to_i + expired = SnapshotDescriptionUtils.isExpiredSnapshot(ttl, snapshot.getCreationTime(), now_timestamp) + if expired + ttl_info = ttl.to_s + ' (Expired) ' + else + ttl_info = ttl.to_s + end + end + info = snapshot.getTableNameAsString + ' (' + creation_time + ') ' + ttl_info + formatter.row([snapshot.getName, info]) end formatter.footer(list.size) From 070bca186eea2511b1ad09830fb16d37708a3954 Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Thu, 6 Jun 2024 11:08:11 +0800 Subject: [PATCH 399/514] HBASE-28625 ExportSnapshot should verify checksums for the source file and the target file (#5950) --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 79 +++++++++++++------ 1 file changed, 53 insertions(+), 26 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index c6f655c37306..fd69960b78da 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -309,24 +309,38 @@ private void copyFile(final Context context, final SnapshotFileInfo inputInfo, in = new ThrottledInputStream(new BufferedInputStream(in), bandwidthMB * 1024 * 1024L); } + Path inputPath = inputStat.getPath(); try { context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen()); // Ensure that the output folder is there and copy the file createOutputPath(outputPath.getParent()); FSDataOutputStream out = outputFs.create(outputPath, true); - try { - copyData(context, inputStat.getPath(), in, outputPath, out, inputStat.getLen()); - } finally { - out.close(); - } + + long stime = EnvironmentEdgeManager.currentTime(); + long totalBytesWritten = + copyData(context, inputPath, in, outputPath, out, inputStat.getLen()); + + // Verify the file length and checksum + verifyCopyResult(inputStat, outputFs.getFileStatus(outputPath)); + + long etime = EnvironmentEdgeManager.currentTime(); + LOG.info("copy completed for input=" + inputPath + " output=" + outputPath); + LOG + .info("size=" + totalBytesWritten + " (" + StringUtils.humanReadableInt(totalBytesWritten) + + ")" + " time=" + StringUtils.formatTimeDiff(etime, stime) + String + .format(" %.3fM/sec", (totalBytesWritten / ((etime - stime) / 1000.0)) / 1048576.0)); + context.getCounter(Counter.FILES_COPIED).increment(1); // Try to Preserve attributes if (!preserveAttributes(outputPath, inputStat)) { LOG.warn("You may have to run manually chown on: " + outputPath); } + } catch (IOException e) { + LOG.error("Error copying " + inputPath + " to " + outputPath, e); + context.getCounter(Counter.COPY_FAILED).increment(1); + throw e; } finally { - in.close(); injectTestFailure(context, inputInfo); } } @@ -403,7 +417,7 @@ private boolean stringIsNotEmpty(final String str) { return str != null && str.length() > 0; } - private void copyData(final Context context, final Path inputPath, final InputStream in, + private long copyData(final Context context, final Path inputPath, final InputStream in, final Path outputPath, final FSDataOutputStream out, final long inputFileSize) throws IOException { final String statusMessage = @@ -415,7 +429,6 @@ private void copyData(final Context context, final Path inputPath, final InputSt int reportBytes = 0; int bytesRead; - long stime = EnvironmentEdgeManager.currentTime(); while ((bytesRead = in.read(buffer)) > 0) { out.write(buffer, 0, bytesRead); totalBytesWritten += bytesRead; @@ -430,7 +443,6 @@ private void copyData(final Context context, final Path inputPath, final InputSt reportBytes = 0; } } - long etime = EnvironmentEdgeManager.currentTime(); context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); context @@ -438,23 +450,10 @@ private void copyData(final Context context, final Path inputPath, final InputSt (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + " to " + outputPath); - // Verify that the written size match - if (totalBytesWritten != inputFileSize) { - String msg = "number of bytes copied not matching copied=" + totalBytesWritten - + " expected=" + inputFileSize + " for file=" + inputPath; - throw new IOException(msg); - } - - LOG.info("copy completed for input=" + inputPath + " output=" + outputPath); - LOG - .info("size=" + totalBytesWritten + " (" + StringUtils.humanReadableInt(totalBytesWritten) - + ")" + " time=" + StringUtils.formatTimeDiff(etime, stime) + String - .format(" %.3fM/sec", (totalBytesWritten / ((etime - stime) / 1000.0)) / 1048576.0)); - context.getCounter(Counter.FILES_COPIED).increment(1); - } catch (IOException e) { - LOG.error("Error copying " + inputPath + " to " + outputPath, e); - context.getCounter(Counter.COPY_FAILED).increment(1); - throw e; + return totalBytesWritten; + } finally { + out.close(); + in.close(); } } @@ -534,6 +533,34 @@ private FileChecksum getFileChecksum(final FileSystem fs, final Path path) { } } + private void verifyCopyResult(final FileStatus inputStat, final FileStatus outputStat) + throws IOException { + long inputLen = inputStat.getLen(); + long outputLen = outputStat.getLen(); + Path inputPath = inputStat.getPath(); + Path outputPath = outputStat.getPath(); + + if (inputLen != outputLen) { + throw new IOException("Mismatch in length of input:" + inputPath + " (" + inputLen + + ") and output:" + outputPath + " (" + outputLen + ")"); + } + + // If length==0, we will skip checksum + if (inputLen != 0 && verifyChecksum) { + FileChecksum inChecksum = getFileChecksum(inputFs, inputPath); + if (inChecksum == null) { + LOG.warn("Input file " + inputPath + " checksums are not available"); + } + FileChecksum outChecksum = getFileChecksum(outputFs, outputPath); + if (outChecksum == null) { + LOG.warn("Output file " + outputPath + " checksums are not available"); + } + if (inChecksum != null && outChecksum != null && !inChecksum.equals(outChecksum)) { + throw new IOException("Checksum mismatch between " + inputPath + " and " + outputPath); + } + } + } + /** * Check if the two files are equal by looking at the file length, and at the checksum (if user * has specified the verifyChecksum flag). From 04816d98a201efc6c6d50eed4a3b6d7154caf5e7 Mon Sep 17 00:00:00 2001 From: DieterDP <90392398+DieterDP-ng@users.noreply.github.com> Date: Thu, 6 Jun 2024 13:48:46 +0200 Subject: [PATCH 400/514] HBASE-28562 Correct backup ancestor calculation (#5868) The ancestor calculation was wrong for incremental backups: when requesting the ancestors for an incremental backup X, the ancestors could include both full and incremental backups that predate the full backup on which X is built. This caused a crash in incremental backup creation when data of old incremental backups was deleted through other means than the HBase API (i.e. without the HBase backup system table being updated). Reviewed-by: Ray Mattingly Signed-off-by: Nick Dimiduk --- .../hbase/backup/impl/BackupManager.java | 99 ------------------- .../hbase/backup/impl/BackupManifest.java | 98 ------------------ .../backup/impl/FullTableBackupClient.java | 2 +- .../impl/IncrementalTableBackupClient.java | 2 +- .../hbase/backup/impl/TableBackupClient.java | 68 +++++++++++-- .../hadoop/hbase/backup/TestBackupBase.java | 4 +- .../TestIncrementalBackupWithDataLoss.java | 85 ++++++++++++++++ 7 files changed, 151 insertions(+), 207 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index ed1755ad5021..f0c93db4b4c2 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -25,7 +25,6 @@ import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupHFileCleaner; @@ -34,8 +33,6 @@ import org.apache.hadoop.hbase.backup.BackupObserver; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; @@ -268,102 +265,6 @@ public void setBackupInfo(BackupInfo backupInfo) { this.backupInfo = backupInfo; } - /** - * Get direct ancestors of the current backup. - * @param backupInfo The backup info for the current backup - * @return The ancestors for the current backup - * @throws IOException exception - */ - public ArrayList getAncestors(BackupInfo backupInfo) throws IOException { - LOG.debug("Getting the direct ancestors of the current backup {}", backupInfo.getBackupId()); - - ArrayList ancestors = new ArrayList<>(); - - // full backup does not have ancestor - if (backupInfo.getType() == BackupType.FULL) { - LOG.debug("Current backup is a full backup, no direct ancestor for it."); - return ancestors; - } - - // get all backup history list in descending order - ArrayList allHistoryList = getBackupHistory(true); - for (BackupInfo backup : allHistoryList) { - - BackupImage.Builder builder = BackupImage.newBuilder(); - - BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) - .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) - .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); - - // Only direct ancestors for a backup are required and not entire history of backup for this - // table resulting in verifying all of the previous backups which is unnecessary and backup - // paths need not be valid beyond the lifetime of a backup. - // - // RootDir is way of grouping a single backup including one full and many incremental backups - if (!image.getRootDir().equals(backupInfo.getBackupRootDir())) { - continue; - } - - // add the full backup image as an ancestor until the last incremental backup - if (backup.getType().equals(BackupType.FULL)) { - // check the backup image coverage, if previous image could be covered by the newer ones, - // then no need to add - if (!BackupManifest.canCoverImage(ancestors, image)) { - ancestors.add(image); - } - } else { - // found last incremental backup, if previously added full backup ancestor images can cover - // it, then this incremental ancestor is not the dependent of the current incremental - // backup, that is to say, this is the backup scope boundary of current table set. - // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing - // incremental backup - if (BackupManifest.canCoverImage(ancestors, image)) { - LOG.debug("Met the backup boundary of the current table set:"); - for (BackupImage image1 : ancestors) { - LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir()); - } - } else { - Path logBackupPath = - HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); - LOG.debug( - "Current backup has an incremental backup ancestor, " - + "touching its image manifest in {}" + " to construct the dependency.", - logBackupPath.toString()); - BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); - BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); - ancestors.add(lastIncrImage); - - LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}", - lastIncrImage.getBackupId(), lastIncrImage.getRootDir()); - } - } - } - LOG.debug("Got {} ancestors for the current backup.", ancestors.size()); - return ancestors; - } - - /** - * Get the direct ancestors of this backup for one table involved. - * @param backupInfo backup info - * @param table table - * @return backupImages on the dependency list - * @throws IOException exception - */ - public ArrayList getAncestors(BackupInfo backupInfo, TableName table) - throws IOException { - ArrayList ancestors = getAncestors(backupInfo); - ArrayList tableAncestors = new ArrayList<>(); - for (BackupImage image : ancestors) { - if (image.hasTable(table)) { - tableAncestors.add(image); - if (image.getType() == BackupType.FULL) { - break; - } - } - } - return tableAncestors; - } - /* * backup system table operations */ diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 237d8686ab79..d66b5886794c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -548,104 +548,6 @@ public ArrayList getDependentListByTable(TableName table) { return tableImageList; } - /** - * Get the full dependent image list in the whole dependency scope for a specific table of this - * backup in time order from old to new. - * @param table table - * @return the full backup image list for a table in time order in the whole scope of the - * dependency of this image - */ - public ArrayList getAllDependentListByTable(TableName table) { - ArrayList tableImageList = new ArrayList<>(); - ArrayList imageList = getRestoreDependentList(false); - for (BackupImage image : imageList) { - if (image.hasTable(table)) { - tableImageList.add(image); - } - } - return tableImageList; - } - - /** - * Check whether backup image1 could cover backup image2 or not. - * @param image1 backup image 1 - * @param image2 backup image 2 - * @return true if image1 can cover image2, otherwise false - */ - public static boolean canCoverImage(BackupImage image1, BackupImage image2) { - // image1 can cover image2 only when the following conditions are satisfied: - // - image1 must not be an incremental image; - // - image1 must be taken after image2 has been taken; - // - table set of image1 must cover the table set of image2. - if (image1.getType() == BackupType.INCREMENTAL) { - return false; - } - if (image1.getStartTs() < image2.getStartTs()) { - return false; - } - List image1TableList = image1.getTableNames(); - List image2TableList = image2.getTableNames(); - boolean found; - for (int i = 0; i < image2TableList.size(); i++) { - found = false; - for (int j = 0; j < image1TableList.size(); j++) { - if (image2TableList.get(i).equals(image1TableList.get(j))) { - found = true; - break; - } - } - if (!found) { - return false; - } - } - - LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); - return true; - } - - /** - * Check whether backup image set could cover a backup image or not. - * @param fullImages The backup image set - * @param image The target backup image - * @return true if fullImages can cover image, otherwise false - */ - public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { - // fullImages can cover image only when the following conditions are satisfied: - // - each image of fullImages must not be an incremental image; - // - each image of fullImages must be taken after image has been taken; - // - sum table set of fullImages must cover the table set of image. - for (BackupImage image1 : fullImages) { - if (image1.getType() == BackupType.INCREMENTAL) { - return false; - } - if (image1.getStartTs() < image.getStartTs()) { - return false; - } - } - - ArrayList image1TableList = new ArrayList<>(); - for (BackupImage image1 : fullImages) { - List tableList = image1.getTableNames(); - for (TableName table : tableList) { - image1TableList.add(table.getNameAsString()); - } - } - ArrayList image2TableList = new ArrayList<>(); - List tableList = image.getTableNames(); - for (TableName table : tableList) { - image2TableList.add(table.getNameAsString()); - } - - for (int i = 0; i < image2TableList.size(); i++) { - if (image1TableList.contains(image2TableList.get(i)) == false) { - return false; - } - } - - LOG.debug("Full image set can cover image " + image.getBackupId()); - return true; - } - public BackupInfo toBackupInfo() { BackupInfo info = new BackupInfo(); info.setType(backupImage.getType()); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index fee2e825728e..06dad8880b5b 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -190,7 +190,7 @@ public void execute() throws IOException { backupManager.writeBackupStartCode(newStartCode); // backup complete - completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf); + completeBackup(conn, backupInfo, BackupType.FULL, conf); } catch (Exception e) { failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ", BackupType.FULL, conf); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index 211e9f96c89c..b7d1c4a95cc6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -310,7 +310,7 @@ public void execute() throws IOException { handleBulkLoad(backupInfo.getTableNames()); // backup complete - completeBackup(conn, backupInfo, backupManager, BackupType.INCREMENTAL, conf); + completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf); } catch (IOException e) { failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java index e758ced3f846..0aa6516fe4f3 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java @@ -19,8 +19,12 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -271,8 +275,8 @@ public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo bac * @param backupInfo The current backup info * @throws IOException exception */ - protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type, - Configuration conf) throws IOException { + protected void addManifest(BackupInfo backupInfo, BackupType type, Configuration conf) + throws IOException { // set the overall backup phase : store manifest backupInfo.setPhase(BackupPhase.STORE_MANIFEST); @@ -281,13 +285,65 @@ protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, B // set the table region server start and end timestamps for incremental backup manifest.setIncrTimestampMap(backupInfo.getIncrTimestampMap()); } - ArrayList ancestors = backupManager.getAncestors(backupInfo); + List ancestors = getAncestors(backupInfo); for (BackupImage image : ancestors) { manifest.addDependentImage(image); } manifest.store(conf); } + /** + * Gets the direct ancestors of the currently being created backup. + * @param backupInfo The backup info for the backup being created + */ + protected List getAncestors(BackupInfo backupInfo) throws IOException { + LOG.debug("Getting the direct ancestors of the current backup {}", backupInfo.getBackupId()); + + // Full backups do not have ancestors + if (backupInfo.getType() == BackupType.FULL) { + LOG.debug("Current backup is a full backup, no direct ancestor for it."); + return Collections.emptyList(); + } + + List ancestors = new ArrayList<>(); + Set tablesToCover = new HashSet<>(backupInfo.getTables()); + + // Go over the backup history list from newest to oldest + List allHistoryList = backupManager.getBackupHistory(true); + for (BackupInfo backup : allHistoryList) { + // If the image has a different rootDir, it cannot be an ancestor. + if (!Objects.equals(backup.getBackupRootDir(), backupInfo.getBackupRootDir())) { + continue; + } + + BackupImage.Builder builder = BackupImage.newBuilder(); + BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) + .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) + .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); + + // The ancestors consist of the most recent FULL backups that cover the list of tables + // required in the new backup and all INCREMENTAL backups that came after one of those FULL + // backups. + if (backup.getType().equals(BackupType.INCREMENTAL)) { + ancestors.add(image); + LOG.debug("Dependent incremental backup image: {BackupID={}}", image.getBackupId()); + } else { + if (tablesToCover.removeAll(new HashSet<>(image.getTableNames()))) { + ancestors.add(image); + LOG.debug("Dependent full backup image: {BackupID={}}", image.getBackupId()); + + if (tablesToCover.isEmpty()) { + LOG.debug("Got {} ancestors for the current backup.", ancestors.size()); + return Collections.unmodifiableList(ancestors); + } + } + } + } + + throw new IllegalStateException( + "Unable to find full backup that contains tables: " + tablesToCover); + } + /** * Get backup request meta data dir as string. * @param backupInfo backup info @@ -312,15 +368,15 @@ protected String obtainBackupMetaDataStr(BackupInfo backupInfo) { * @param backupInfo backup info * @throws IOException exception */ - protected void completeBackup(final Connection conn, BackupInfo backupInfo, - BackupManager backupManager, BackupType type, Configuration conf) throws IOException { + protected void completeBackup(final Connection conn, BackupInfo backupInfo, BackupType type, + Configuration conf) throws IOException { // set the complete timestamp of the overall backup backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime()); // set overall backup status: complete backupInfo.setState(BackupState.COMPLETE); backupInfo.setProgress(100); // add and store the manifest for the backup - addManifest(backupInfo, backupManager, type, conf); + addManifest(backupInfo, type, conf); // compose the backup complete data String backupCompleteData = diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 7b5095a897e2..e9c1cfd9c323 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -160,7 +160,7 @@ public void execute() throws IOException { failStageIf(Stage.stage_4); // backup complete - completeBackup(conn, backupInfo, backupManager, BackupType.INCREMENTAL, conf); + completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf); } catch (Exception e) { failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", @@ -244,7 +244,7 @@ public void execute() throws IOException { backupManager.writeBackupStartCode(newStartCode); failStageIf(Stage.stage_4); // backup complete - completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf); + completeBackup(conn, backupInfo, BackupType.FULL, conf); } catch (Exception e) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java new file mode 100644 index 000000000000..cf442f5f0dd7 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestIncrementalBackupWithDataLoss extends TestBackupBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncrementalBackupWithDataLoss.class); + + private static final Logger LOG = + LoggerFactory.getLogger(TestIncrementalBackupWithDataLoss.class); + + @Test + public void testFullBackupBreaksDependencyOnOlderBackups() throws Exception { + LOG.info("test creation of backups after backup data was lost"); + + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + BackupAdminImpl client = new BackupAdminImpl(conn); + List tables = Lists.newArrayList(table1); + + insertIntoTable(conn, table1, famName, 1, 1).close(); + String backup1 = + client.backupTables(createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR)); + insertIntoTable(conn, table1, famName, 2, 1).close(); + String backup2 = + client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); + + assertTrue(checkSucceeded(backup1)); + assertTrue(checkSucceeded(backup2)); + + // Simulate data loss on the backup storage + TEST_UTIL.getTestFileSystem().delete(new Path(BACKUP_ROOT_DIR, backup2), true); + + insertIntoTable(conn, table1, famName, 4, 1).close(); + String backup4 = + client.backupTables(createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR)); + insertIntoTable(conn, table1, famName, 5, 1).close(); + String backup5 = + client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); + insertIntoTable(conn, table1, famName, 6, 1).close(); + String backup6 = + client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); + + assertTrue(checkSucceeded(backup4)); + assertTrue(checkSucceeded(backup5)); + assertTrue(checkSucceeded(backup6)); + } + } + +} From c865570d7724a0de90b209aba6c378e3f378321d Mon Sep 17 00:00:00 2001 From: Subrat Mishra Date: Thu, 6 Jun 2024 18:01:37 +0530 Subject: [PATCH 401/514] HBASE-28618: Fixed hadolint check in nightly build. (#5957) Signed-off-by: Wellington Chevreuil Reviewed-by: Duo Zhang --- dev-support/hbase_docker/m1/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/hbase_docker/m1/Dockerfile b/dev-support/hbase_docker/m1/Dockerfile index 5399fa0e5af6..fa88638a7aef 100644 --- a/dev-support/hbase_docker/m1/Dockerfile +++ b/dev-support/hbase_docker/m1/Dockerfile @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=linux/amd64 ubuntu:22.04 AS base_image +FROM amd64/ubuntu:22.04 AS base_image SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \ From 00b1bba5065d816523972baec56088db4ee20b85 Mon Sep 17 00:00:00 2001 From: bcolyn-ngdata <94831216+bcolyn-ngdata@users.noreply.github.com> Date: Thu, 6 Jun 2024 15:38:14 +0200 Subject: [PATCH 402/514] HBASE-28539 Merge of incremental backups fails if backups are on a separate FileSystem (#5867) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the backups are stored on a location that is not the DistributedFilesystem underpinning HBase itself merging of incremental backups fails. Detected with backups stored on S3A, but can be reproduced with any other (like LocalFilesystem). Reviewed-by: Ray Mattingly Signed-off-by: Nick Dimiduk Signed-off-by: Andor Molnár --- .../mapreduce/MapReduceBackupMergeJob.java | 12 +++-- .../hadoop/hbase/backup/TestBackupMerge.java | 44 +++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java index 3b4cf0246d73..1f9ae16c1dfa 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java @@ -96,7 +96,7 @@ public void run(String[] backupIds) throws IOException { boolean finishedTables = false; Connection conn = ConnectionFactory.createConnection(getConf()); BackupSystemTable table = new BackupSystemTable(conn); - FileSystem fs = FileSystem.get(getConf()); + FileSystem fs = null; try { @@ -112,6 +112,8 @@ public void run(String[] backupIds) throws IOException { BackupInfo bInfo = table.readBackupInfo(backupIds[0]); String backupRoot = bInfo.getBackupRootDir(); + Path backupRootPath = new Path(backupRoot); + fs = backupRootPath.getFileSystem(conf); for (int i = 0; i < tableNames.length; i++) { LOG.info("Merge backup images for " + tableNames[i]); @@ -120,7 +122,9 @@ public void run(String[] backupIds) throws IOException { Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = BackupUtils.getBulkOutputDir( + // bulkOutputPath should be on the same filesystem as backupRoot + Path tmpRestoreOutputDir = HBackupFileSystem.getBackupTmpDirPath(backupRoot); + Path bulkOutputPath = BackupUtils.getBulkOutputDir(tmpRestoreOutputDir, BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { @@ -186,7 +190,9 @@ public void run(String[] backupIds) throws IOException { if (!finishedTables) { // cleanup bulk directories and finish merge // merge MUST be repeated (no need for repair) - cleanupBulkLoadDirs(fs, toPathList(processedTableList)); + if (fs != null) { + cleanupBulkLoadDirs(fs, toPathList(processedTableList)); + } table.finishMergeOperation(); table.finishBackupExclusiveOperation(); throw new IOException("Backup merge operation failed, you should try it again", e); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index c34f6be43b5e..5a6d21dad84f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.io.File; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; @@ -124,4 +126,46 @@ public void TestIncBackupMergeRestore() throws Exception { admin.close(); conn.close(); } + + @Test + public void testIncBackupMergeRestoreSeparateFs() throws Exception { + String originalBackupRoot = BACKUP_ROOT_DIR; + // prepare BACKUP_ROOT_DIR on a different filesystem from HBase. + String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString(); + BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString(); + + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + BackupAdminImpl client = new BackupAdminImpl(conn); + List tables = Lists.newArrayList(table1, table2); + + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String backupIdFull = client.backupTables(request); + assertTrue(checkSucceeded(backupIdFull)); + + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple = client.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple2 = client.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple2)); + + try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) { + String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 }; + // this throws java.lang.IllegalArgumentException: Wrong FS prior to HBASE-28539 + bAdmin.mergeBackups(backups); + } + + assertTrue( + new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdFull).toUri()).exists()); + assertFalse( + new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdIncMultiple).toUri()) + .exists()); + assertTrue( + new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdIncMultiple2).toUri()) + .exists()); + } finally { + BACKUP_ROOT_DIR = originalBackupRoot; + } + } } From a94b78666f529f6731cc79c09b628fd099d177ad Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 6 Jun 2024 22:26:49 +0800 Subject: [PATCH 403/514] HBASE-28636 Add UTs for testing copy/sync table between clusters (#5963) Signed-off-by: Xin Sun --- .../hadoop/hbase/mapreduce/SyncTable.java | 21 +- .../hbase/mapreduce/CopyTableTestBase.java | 279 +++++++ .../hadoop/hbase/mapreduce/TestCopyTable.java | 287 ++------ .../mapreduce/TestCopyTableToPeerCluster.java | 148 ++++ .../hadoop/hbase/mapreduce/TestSyncTable.java | 688 +++++++++--------- 5 files changed, 808 insertions(+), 615 deletions(-) create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index c1cf132d0302..146f4ec6511f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -92,9 +92,7 @@ public SyncTable(Configuration conf) { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), zookeeper); - if ("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))) { - TableMapReduceUtil.initCredentialsForCluster(job, peerConf); - } + TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } public Job createSubmittableJob(String[] args) throws IOException { @@ -172,12 +170,6 @@ public Job createSubmittableJob(String[] args) throws IOException { // would be nice to add an option for bulk load instead } - // Obtain an authentication token, for the specified cluster, on behalf of the current user - if (sourceZkCluster != null) { - Configuration peerConf = - HBaseConfiguration.createClusterConf(job.getConfiguration(), sourceZkCluster); - TableMapReduceUtil.initCredentialsForCluster(job, peerConf); - } return job; } @@ -220,7 +212,6 @@ public static enum Counter { @Override protected void setup(Context context) throws IOException { - Configuration conf = context.getConfiguration(); sourceHashDir = new Path(conf.get(SOURCE_HASH_DIR_CONF_KEY)); sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY, null); @@ -292,9 +283,7 @@ protected void map(ImmutableBytesWritable key, Result value, Context context) } } catch (Throwable t) { mapperException = t; - Throwables.propagateIfInstanceOf(t, IOException.class); - Throwables.propagateIfInstanceOf(t, InterruptedException.class); - Throwables.propagate(t); + throw t; } } @@ -693,9 +682,9 @@ protected void cleanup(Context context) throws IOException, InterruptedException // propagate first exception if (mapperException != null) { - Throwables.propagateIfInstanceOf(mapperException, IOException.class); - Throwables.propagateIfInstanceOf(mapperException, InterruptedException.class); - Throwables.propagate(mapperException); + Throwables.throwIfInstanceOf(mapperException, IOException.class); + Throwables.throwIfInstanceOf(mapperException, InterruptedException.class); + Throwables.throwIfUnchecked(mapperException); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java new file mode 100644 index 000000000000..d7648c26406d --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java @@ -0,0 +1,279 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.mob.MobTestUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Rule; +import org.junit.rules.TestName; + +/** + * Base class for testing CopyTable MR tool. + */ +public abstract class CopyTableTestBase { + + protected static final byte[] ROW1 = Bytes.toBytes("row1"); + protected static final byte[] ROW2 = Bytes.toBytes("row2"); + protected static final String FAMILY_A_STRING = "a"; + protected static final String FAMILY_B_STRING = "b"; + protected static final byte[] FAMILY_A = Bytes.toBytes(FAMILY_A_STRING); + protected static final byte[] FAMILY_B = Bytes.toBytes(FAMILY_B_STRING); + protected static final byte[] QUALIFIER = Bytes.toBytes("q"); + + @Rule + public TestName name = new TestName(); + + protected abstract Table createSourceTable(TableDescriptor desc) throws Exception; + + protected abstract Table createTargetTable(TableDescriptor desc) throws Exception; + + protected abstract void dropSourceTable(TableName tableName) throws Exception; + + protected abstract void dropTargetTable(TableName tableName) throws Exception; + + protected abstract String[] getPeerClusterOptions() throws Exception; + + protected final void loadData(Table t, byte[] family, byte[] column) throws IOException { + for (int i = 0; i < 10; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put p = new Put(row); + p.addColumn(family, column, row); + t.put(p); + } + } + + protected final void verifyRows(Table t, byte[] family, byte[] column) throws IOException { + for (int i = 0; i < 10; i++) { + byte[] row = Bytes.toBytes("row" + i); + Get g = new Get(row).addFamily(family); + Result r = t.get(g); + assertNotNull(r); + assertEquals(1, r.size()); + Cell cell = r.rawCells()[0]; + assertTrue(CellUtil.matchingQualifier(cell, column)); + assertEquals(Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength(), row, 0, row.length), 0); + } + } + + protected final void doCopyTableTest(Configuration conf, boolean bulkload) throws Exception { + TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); + TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + byte[] family = Bytes.toBytes("family"); + byte[] column = Bytes.toBytes("c1"); + TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(tableName2) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + + try (Table t1 = createSourceTable(desc1); Table t2 = createTargetTable(desc2)) { + // put rows into the first table + loadData(t1, family, column); + + String[] peerClusterOptions = getPeerClusterOptions(); + if (bulkload) { + assertTrue(runCopy(conf, + ArrayUtils.addAll(peerClusterOptions, "--new.name=" + tableName2.getNameAsString(), + "--bulkload", tableName1.getNameAsString()))); + } else { + assertTrue(runCopy(conf, ArrayUtils.addAll(peerClusterOptions, + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString()))); + } + + // verify the data was copied into table 2 + verifyRows(t2, family, column); + } finally { + dropSourceTable(tableName1); + dropTargetTable(tableName2); + } + } + + protected final void doCopyTableTestWithMob(Configuration conf, boolean bulkload) + throws Exception { + TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); + TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + byte[] family = Bytes.toBytes("mob"); + byte[] column = Bytes.toBytes("c1"); + + ColumnFamilyDescriptorBuilder cfd = ColumnFamilyDescriptorBuilder.newBuilder(family); + + cfd.setMobEnabled(true); + cfd.setMobThreshold(5); + TableDescriptor desc1 = + TableDescriptorBuilder.newBuilder(tableName1).setColumnFamily(cfd.build()).build(); + TableDescriptor desc2 = + TableDescriptorBuilder.newBuilder(tableName2).setColumnFamily(cfd.build()).build(); + + try (Table t1 = createSourceTable(desc1); Table t2 = createTargetTable(desc2)) { + // put rows into the first table + for (int i = 0; i < 10; i++) { + Put p = new Put(Bytes.toBytes("row" + i)); + p.addColumn(family, column, column); + t1.put(p); + } + + String[] peerClusterOptions = getPeerClusterOptions(); + if (bulkload) { + assertTrue(runCopy(conf, + ArrayUtils.addAll(peerClusterOptions, "--new.name=" + tableName2.getNameAsString(), + "--bulkload", tableName1.getNameAsString()))); + } else { + assertTrue(runCopy(conf, ArrayUtils.addAll(peerClusterOptions, + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString()))); + } + + // verify the data was copied into table 2 + for (int i = 0; i < 10; i++) { + Get g = new Get(Bytes.toBytes("row" + i)); + Result r = t2.get(g); + assertEquals(1, r.size()); + assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], column)); + assertEquals("compare row values between two tables", + t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); + } + + assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(t1), + MobTestUtil.countMobRows(t2)); + assertEquals("compare count of mob row values between two tables", + t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); + assertTrue("The mob row count is 0 but should be > 0", MobTestUtil.countMobRows(t2) > 0); + } finally { + dropSourceTable(tableName1); + dropTargetTable(tableName2); + } + } + + protected final boolean runCopy(Configuration conf, String[] args) throws Exception { + int status = ToolRunner.run(conf, new CopyTable(), args); + return status == 0; + } + + protected final void testStartStopRow(Configuration conf) throws Exception { + final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); + final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + final byte[] family = Bytes.toBytes("family"); + final byte[] column = Bytes.toBytes("c1"); + final byte[] row0 = Bytes.toBytesBinary("\\x01row0"); + final byte[] row1 = Bytes.toBytesBinary("\\x01row1"); + final byte[] row2 = Bytes.toBytesBinary("\\x01row2"); + TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(tableName2) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + try (Table t1 = createSourceTable(desc1); Table t2 = createTargetTable(desc2)) { + // put rows into the first table + Put p = new Put(row0); + p.addColumn(family, column, column); + t1.put(p); + p = new Put(row1); + p.addColumn(family, column, column); + t1.put(p); + p = new Put(row2); + p.addColumn(family, column, column); + t1.put(p); + + String[] peerClusterOptions = getPeerClusterOptions(); + assertTrue(runCopy(conf, ArrayUtils.addAll(peerClusterOptions, "--new.name=" + tableName2, + "--startrow=\\x01row1", "--stoprow=\\x01row2", tableName1.getNameAsString()))); + + // verify the data was copied into table 2 + // row1 exist, row0, row2 do not exist + Get g = new Get(row1); + Result r = t2.get(g); + assertEquals(1, r.size()); + assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], column)); + + g = new Get(row0); + r = t2.get(g); + assertEquals(0, r.size()); + + g = new Get(row2); + r = t2.get(g); + assertEquals(0, r.size()); + } finally { + dropSourceTable(tableName1); + dropTargetTable(tableName2); + } + } + + protected final void testRenameFamily(Configuration conf) throws Exception { + TableName sourceTable = TableName.valueOf(name.getMethodName() + "-source"); + TableName targetTable = TableName.valueOf(name.getMethodName() + "-target"); + + TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(sourceTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_A)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_B)).build(); + TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(targetTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_A)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_B)).build(); + + try (Table t = createSourceTable(desc1); Table t2 = createTargetTable(desc2)) { + Put p = new Put(ROW1); + p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11")); + p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12")); + p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data13")); + t.put(p); + p = new Put(ROW2); + p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Dat21")); + p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data22")); + p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23")); + t.put(p); + + long currentTime = EnvironmentEdgeManager.currentTime(); + String[] args = ArrayUtils.addAll(getPeerClusterOptions(), "--new.name=" + targetTable, + "--families=a:b", "--all.cells", "--starttime=" + (currentTime - 100000), + "--endtime=" + (currentTime + 100000), "--versions=1", sourceTable.getNameAsString()); + assertNull(t2.get(new Get(ROW1)).getRow()); + + assertTrue(runCopy(conf, args)); + + assertNotNull(t2.get(new Get(ROW1)).getRow()); + Result res = t2.get(new Get(ROW1)); + byte[] b1 = res.getValue(FAMILY_B, QUALIFIER); + assertEquals("Data13", Bytes.toString(b1)); + assertNotNull(t2.get(new Get(ROW2)).getRow()); + res = t2.get(new Get(ROW2)); + b1 = res.getValue(FAMILY_A, QUALIFIER); + // Data from the family of B is not copied + assertNull(b1); + } finally { + dropSourceTable(sourceTable); + dropTargetTable(targetTable); + } + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 08dbe77b5c7e..11e377b199f4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -18,37 +18,26 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.mob.MobTestUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; -import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; @@ -60,20 +49,13 @@ * Basic test for the CopyTable M/R tool */ @Category({ MapReduceTests.class, LargeTests.class }) -public class TestCopyTable { +public class TestCopyTable extends CopyTableTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCopyTable.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final byte[] ROW1 = Bytes.toBytes("row1"); - private static final byte[] ROW2 = Bytes.toBytes("row2"); - private static final String FAMILY_A_STRING = "a"; - private static final String FAMILY_B_STRING = "b"; - private static final byte[] FAMILY_A = Bytes.toBytes(FAMILY_A_STRING); - private static final byte[] FAMILY_B = Bytes.toBytes(FAMILY_B_STRING); - private static final byte[] QUALIFIER = Bytes.toBytes("q"); @Rule public TestName name = new TestName(); @@ -88,94 +70,29 @@ public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - private void doCopyTableTest(boolean bulkload) throws Exception { - final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); - final byte[] FAMILY = Bytes.toBytes("family"); - final byte[] COLUMN1 = Bytes.toBytes("c1"); - - try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { - // put rows into the first table - loadData(t1, FAMILY, COLUMN1); - - CopyTable copy = new CopyTable(); - int code; - if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, - new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", - tableName1.getNameAsString() }); - } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { - "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); - } - assertEquals("copy job failed", 0, code); - - // verify the data was copied into table 2 - verifyRows(t2, FAMILY, COLUMN1); - } finally { - TEST_UTIL.deleteTable(tableName1); - TEST_UTIL.deleteTable(tableName2); - } + @Override + protected Table createSourceTable(TableDescriptor desc) throws Exception { + return TEST_UTIL.createTable(desc, null); } - private void doCopyTableTestWithMob(boolean bulkload) throws Exception { - final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); - final byte[] FAMILY = Bytes.toBytes("mob"); - final byte[] COLUMN1 = Bytes.toBytes("c1"); - - ColumnFamilyDescriptorBuilder cfd = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); - - cfd.setMobEnabled(true); - cfd.setMobThreshold(5); - TableDescriptor desc1 = - TableDescriptorBuilder.newBuilder(tableName1).setColumnFamily(cfd.build()).build(); - TableDescriptor desc2 = - TableDescriptorBuilder.newBuilder(tableName2).setColumnFamily(cfd.build()).build(); - - try (Table t1 = TEST_UTIL.createTable(desc1, null); - Table t2 = TEST_UTIL.createTable(desc2, null);) { - - // put rows into the first table - for (int i = 0; i < 10; i++) { - Put p = new Put(Bytes.toBytes("row" + i)); - p.addColumn(FAMILY, COLUMN1, COLUMN1); - t1.put(p); - } - - CopyTable copy = new CopyTable(); + @Override + protected Table createTargetTable(TableDescriptor desc) throws Exception { + return TEST_UTIL.createTable(desc, null); + } - int code; - if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, - new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", - tableName1.getNameAsString() }); - } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { - "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); - } - assertEquals("copy job failed", 0, code); + @Override + protected void dropSourceTable(TableName tableName) throws Exception { + TEST_UTIL.deleteTable(tableName); + } - // verify the data was copied into table 2 - for (int i = 0; i < 10; i++) { - Get g = new Get(Bytes.toBytes("row" + i)); - Result r = t2.get(g); - assertEquals(1, r.size()); - assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); - assertEquals("compare row values between two tables", - t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); - } + @Override + protected void dropTargetTable(TableName tableName) throws Exception { + TEST_UTIL.deleteTable(tableName); + } - assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(t1), - MobTestUtil.countMobRows(t2)); - assertEquals("compare count of mob row values between two tables", - t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); - assertTrue("The mob row count is 0 but should be > 0", MobTestUtil.countMobRows(t2) > 0); - } finally { - TEST_UTIL.deleteTable(tableName1); - TEST_UTIL.deleteTable(tableName2); - } + @Override + protected String[] getPeerClusterOptions() throws Exception { + return new String[0]; } /** @@ -183,7 +100,7 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { */ @Test public void testCopyTable() throws Exception { - doCopyTableTest(false); + doCopyTableTest(TEST_UTIL.getConfiguration(), false); } /** @@ -191,7 +108,7 @@ public void testCopyTable() throws Exception { */ @Test public void testCopyTableWithBulkload() throws Exception { - doCopyTableTest(true); + doCopyTableTest(TEST_UTIL.getConfiguration(), true); } /** @@ -199,7 +116,7 @@ public void testCopyTableWithBulkload() throws Exception { */ @Test public void testCopyTableWithMob() throws Exception { - doCopyTableTestWithMob(false); + doCopyTableTestWithMob(TEST_UTIL.getConfiguration(), false); } /** @@ -207,58 +124,12 @@ public void testCopyTableWithMob() throws Exception { */ @Test public void testCopyTableWithBulkloadWithMob() throws Exception { - doCopyTableTestWithMob(true); + doCopyTableTestWithMob(TEST_UTIL.getConfiguration(), true); } @Test public void testStartStopRow() throws Exception { - final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); - final byte[] FAMILY = Bytes.toBytes("family"); - final byte[] COLUMN1 = Bytes.toBytes("c1"); - final byte[] row0 = Bytes.toBytesBinary("\\x01row0"); - final byte[] row1 = Bytes.toBytesBinary("\\x01row1"); - final byte[] row2 = Bytes.toBytesBinary("\\x01row2"); - - try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { - - // put rows into the first table - Put p = new Put(row0); - p.addColumn(FAMILY, COLUMN1, COLUMN1); - t1.put(p); - p = new Put(row1); - p.addColumn(FAMILY, COLUMN1, COLUMN1); - t1.put(p); - p = new Put(row2); - p.addColumn(FAMILY, COLUMN1, COLUMN1); - t1.put(p); - - CopyTable copy = new CopyTable(); - assertEquals(0, - ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, - new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", "--stoprow=\\x01row2", - tableName1.getNameAsString() })); - - // verify the data was copied into table 2 - // row1 exist, row0, row2 do not exist - Get g = new Get(row1); - Result r = t2.get(g); - assertEquals(1, r.size()); - assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); - - g = new Get(row0); - r = t2.get(g); - assertEquals(0, r.size()); - - g = new Get(row2); - r = t2.get(g); - assertEquals(0, r.size()); - - } finally { - TEST_UTIL.deleteTable(tableName1); - TEST_UTIL.deleteTable(tableName2); - } + testStartStopRow(TEST_UTIL.getConfiguration()); } /** @@ -266,42 +137,7 @@ public void testStartStopRow() throws Exception { */ @Test public void testRenameFamily() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName() + "source"); - final TableName targetTable = TableName.valueOf(name.getMethodName() + "-target"); - - byte[][] families = { FAMILY_A, FAMILY_B }; - - Table t = TEST_UTIL.createTable(sourceTable, families); - Table t2 = TEST_UTIL.createTable(targetTable, families); - Put p = new Put(ROW1); - p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11")); - p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12")); - p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data13")); - t.put(p); - p = new Put(ROW2); - p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Dat21")); - p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data22")); - p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23")); - t.put(p); - - long currentTime = EnvironmentEdgeManager.currentTime(); - String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells", - "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), - "--versions=1", sourceTable.getNameAsString() }; - assertNull(t2.get(new Get(ROW1)).getRow()); - - assertTrue(runCopy(args)); - - assertNotNull(t2.get(new Get(ROW1)).getRow()); - Result res = t2.get(new Get(ROW1)); - byte[] b1 = res.getValue(FAMILY_B, QUALIFIER); - assertEquals("Data13", Bytes.toString(b1)); - assertNotNull(t2.get(new Get(ROW2)).getRow()); - res = t2.get(new Get(ROW2)); - b1 = res.getValue(FAMILY_A, QUALIFIER); - // Data from the family of B is not copied - assertNull(b1); - + testRenameFamily(TEST_UTIL.getConfiguration()); } /** @@ -331,35 +167,6 @@ public void testMainMethod() throws Exception { assertTrue(data.toString().contains("Usage:")); } - private boolean runCopy(String[] args) throws Exception { - int status = - ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), args); - return status == 0; - } - - private void loadData(Table t, byte[] family, byte[] column) throws IOException { - for (int i = 0; i < 10; i++) { - byte[] row = Bytes.toBytes("row" + i); - Put p = new Put(row); - p.addColumn(family, column, row); - t.put(p); - } - } - - private void verifyRows(Table t, byte[] family, byte[] column) throws IOException { - for (int i = 0; i < 10; i++) { - byte[] row = Bytes.toBytes("row" + i); - Get g = new Get(row).addFamily(family); - Result r = t.get(g); - Assert.assertNotNull(r); - Assert.assertEquals(1, r.size()); - Cell cell = r.rawCells()[0]; - Assert.assertTrue(CellUtil.matchingQualifier(cell, column)); - Assert.assertEquals(Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength(), row, 0, row.length), 0); - } - } - private Table createTable(TableName tableName, byte[] family, boolean isMob) throws IOException { if (isMob) { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(family) @@ -376,20 +183,26 @@ private void testCopyTableBySnapshot(String tablePrefix, boolean bulkLoad, boole throws Exception { TableName table1 = TableName.valueOf(tablePrefix + 1); TableName table2 = TableName.valueOf(tablePrefix + 2); - Table t1 = createTable(table1, FAMILY_A, isMob); - Table t2 = createTable(table2, FAMILY_A, isMob); - loadData(t1, FAMILY_A, Bytes.toBytes("qualifier")); String snapshot = tablePrefix + "_snapshot"; - TEST_UTIL.getAdmin().snapshot(snapshot, table1); - boolean success; - if (bulkLoad) { - success = - runCopy(new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }); - } else { - success = runCopy(new String[] { "--snapshot", "--new.name=" + table2, snapshot }); + try (Table t1 = createTable(table1, FAMILY_A, isMob); + Table t2 = createTable(table2, FAMILY_A, isMob)) { + loadData(t1, FAMILY_A, Bytes.toBytes("qualifier")); + TEST_UTIL.getAdmin().snapshot(snapshot, table1); + boolean success; + if (bulkLoad) { + success = runCopy(TEST_UTIL.getConfiguration(), + new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }); + } else { + success = runCopy(TEST_UTIL.getConfiguration(), + new String[] { "--snapshot", "--new.name=" + table2, snapshot }); + } + assertTrue(success); + verifyRows(t2, FAMILY_A, Bytes.toBytes("qualifier")); + } finally { + TEST_UTIL.getAdmin().deleteSnapshot(snapshot); + TEST_UTIL.deleteTable(table1); + TEST_UTIL.deleteTable(table2); } - Assert.assertTrue(success); - verifyRows(t2, FAMILY_A, Bytes.toBytes("qualifier")); } @Test @@ -412,19 +225,15 @@ public void testLoadingSnapshotAndBulkLoadToMobTable() throws Exception { testCopyTableBySnapshot("testLoadingSnapshotAndBulkLoadToMobTable", true, true); } - @Test - public void testLoadingSnapshotToRemoteCluster() throws Exception { - Assert.assertFalse(runCopy( - new String[] { "--snapshot", "--peerAdr=hbase://remoteHBase", "sourceSnapshotName" })); - } - @Test public void testLoadingSnapshotWithoutSnapshotName() throws Exception { - Assert.assertFalse(runCopy(new String[] { "--snapshot", "--peerAdr=hbase://remoteHBase" })); + assertFalse(runCopy(TEST_UTIL.getConfiguration(), new String[] { "--snapshot" })); } @Test public void testLoadingSnapshotWithoutDestTable() throws Exception { - Assert.assertFalse(runCopy(new String[] { "--snapshot", "sourceSnapshotName" })); + assertFalse( + runCopy(TEST_UTIL.getConfiguration(), new String[] { "--snapshot", "sourceSnapshotName" })); } + } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java new file mode 100644 index 000000000000..f483e00c9177 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import static org.junit.Assert.assertFalse; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test CopyTable between clusters + */ +@Category({ MapReduceTests.class, LargeTests.class }) +public class TestCopyTableToPeerCluster extends CopyTableTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCopyTableToPeerCluster.class); + + private static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil(); + + private static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); + + @BeforeClass + public static void beforeClass() throws Exception { + UTIL1.startMiniCluster(3); + UTIL2.startMiniCluster(3); + } + + @AfterClass + public static void afterClass() throws Exception { + UTIL1.shutdownMiniCluster(); + UTIL2.shutdownMiniCluster(); + } + + @Override + protected Table createSourceTable(TableDescriptor desc) throws Exception { + return UTIL1.createTable(desc, null); + } + + @Override + protected Table createTargetTable(TableDescriptor desc) throws Exception { + return UTIL2.createTable(desc, null); + } + + @Override + protected void dropSourceTable(TableName tableName) throws Exception { + UTIL1.deleteTable(tableName); + } + + @Override + protected void dropTargetTable(TableName tableName) throws Exception { + UTIL2.deleteTable(tableName); + } + + @Override + protected String[] getPeerClusterOptions() throws Exception { + return new String[] { "--peer.adr=" + UTIL2.getClusterKey() }; + } + + /** + * Simple end-to-end test + */ + @Test + public void testCopyTable() throws Exception { + doCopyTableTest(UTIL1.getConfiguration(), false); + } + + /** + * Simple end-to-end test on table with MOB + */ + @Test + public void testCopyTableWithMob() throws Exception { + doCopyTableTestWithMob(UTIL1.getConfiguration(), false); + } + + @Test + public void testStartStopRow() throws Exception { + testStartStopRow(UTIL1.getConfiguration()); + } + + /** + * Test copy of table from sourceTable to targetTable all rows from family a + */ + @Test + public void testRenameFamily() throws Exception { + testRenameFamily(UTIL1.getConfiguration()); + } + + @Test + public void testBulkLoadNotSupported() throws Exception { + TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); + TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + try (Table t1 = UTIL1.createTable(tableName1, FAMILY_A); + Table t2 = UTIL2.createTable(tableName2, FAMILY_A)) { + assertFalse(runCopy(UTIL1.getConfiguration(), + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", + "--peer.adr=" + UTIL2.getClusterKey(), tableName1.getNameAsString() })); + } finally { + UTIL1.deleteTable(tableName1); + UTIL2.deleteTable(tableName2); + } + } + + @Test + public void testSnapshotNotSupported() throws Exception { + TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); + TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + String snapshot = tableName1.getNameAsString() + "_snapshot"; + try (Table t1 = UTIL1.createTable(tableName1, FAMILY_A); + Table t2 = UTIL2.createTable(tableName2, FAMILY_A)) { + UTIL1.getAdmin().snapshot(snapshot, tableName1); + assertFalse(runCopy(UTIL1.getConfiguration(), + new String[] { "--new.name=" + tableName2.getNameAsString(), "--snapshot", + "--peer.adr=" + UTIL2.getClusterKey(), snapshot })); + } finally { + UTIL1.getAdmin().deleteSnapshot(snapshot); + UTIL1.deleteTable(tableName1); + UTIL2.deleteTable(tableName2); + } + + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index ca2dbdb0f671..d775f256ef12 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -18,9 +18,13 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.util.Arrays; +import java.util.function.BooleanSupplier; import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -35,11 +39,11 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Counters; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; @@ -49,12 +53,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; - /** * Basic test for the SyncTable M/R tool */ -@Category(LargeTests.class) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestSyncTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -62,20 +64,23 @@ public class TestSyncTable { private static final Logger LOG = LoggerFactory.getLogger(TestSyncTable.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil(); + + private static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); @Rule public TestName name = new TestName(); @BeforeClass public static void beforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + UTIL1.startMiniCluster(3); + UTIL2.startMiniCluster(3); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.cleanupDataTestDirOnTestFS(); - TEST_UTIL.shutdownMiniCluster(); + UTIL2.shutdownMiniCluster(); + UTIL1.shutdownMiniCluster(); } private static byte[][] generateSplits(int numRows, int numRegions) { @@ -86,16 +91,17 @@ private static byte[][] generateSplits(int numRows, int numRegions) { return splitRows; } - @Test - public void testSyncTable() throws Exception { + private void testSyncTable(HBaseTestingUtil source, HBaseTestingUtil target, String... options) + throws Exception { final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTable"); + Path testDir = source.getDataTestDirOnTestFS(name.getMethodName()); - writeTestData(sourceTableName, targetTableName); - hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir); - assertEqualTables(90, sourceTableName, targetTableName, false); + writeTestData(source, sourceTableName, target, targetTableName); + hashSourceTable(source, sourceTableName, testDir); + Counters syncCounters = + syncTables(target.getConfiguration(), sourceTableName, targetTableName, testDir, options); + assertEqualTables(90, source, sourceTableName, target, targetTableName, false); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); @@ -104,21 +110,37 @@ public void testSyncTable() throws Exception { assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); - TEST_UTIL.deleteTable(sourceTableName); - TEST_UTIL.deleteTable(targetTableName); + source.deleteTable(sourceTableName); + target.deleteTable(targetTableName); + } + + @Test + public void testSyncTable() throws Exception { + testSyncTable(UTIL1, UTIL1); + } + + @Test + public void testSyncTableToPeerCluster() throws Exception { + testSyncTable(UTIL1, UTIL2, "--sourcezkcluster=" + UTIL1.getClusterKey()); + } + + @Test + public void testSyncTableFromSourceToPeerCluster() throws Exception { + testSyncTable(UTIL2, UTIL1, "--sourcezkcluster=" + UTIL2.getClusterKey(), + "--targetzkcluster=" + UTIL1.getClusterKey()); } @Test public void testSyncTableDoDeletesFalse() throws Exception { final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableDoDeletesFalse"); + Path testDir = UTIL1.getDataTestDirOnTestFS(name.getMethodName()); - writeTestData(sourceTableName, targetTableName); - hashSourceTable(sourceTableName, testDir); - Counters syncCounters = - syncTables(sourceTableName, targetTableName, testDir, "--doDeletes=false"); - assertTargetDoDeletesFalse(100, sourceTableName, targetTableName); + writeTestData(UTIL1, sourceTableName, UTIL1, targetTableName); + hashSourceTable(UTIL1, sourceTableName, testDir); + Counters syncCounters = syncTables(UTIL1.getConfiguration(), sourceTableName, targetTableName, + testDir, "--doDeletes=false"); + assertTargetDoDeletesFalse(100, UTIL1, sourceTableName, UTIL1, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); @@ -127,20 +149,21 @@ public void testSyncTableDoDeletesFalse() throws Exception { assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); - TEST_UTIL.deleteTable(sourceTableName); - TEST_UTIL.deleteTable(targetTableName); + UTIL1.deleteTable(sourceTableName); + UTIL1.deleteTable(targetTableName); } @Test public void testSyncTableDoPutsFalse() throws Exception { final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableDoPutsFalse"); + Path testDir = UTIL2.getDataTestDirOnTestFS(name.getMethodName()); - writeTestData(sourceTableName, targetTableName); - hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doPuts=false"); - assertTargetDoPutsFalse(70, sourceTableName, targetTableName); + writeTestData(UTIL2, sourceTableName, UTIL2, targetTableName); + hashSourceTable(UTIL2, sourceTableName, testDir); + Counters syncCounters = syncTables(UTIL2.getConfiguration(), sourceTableName, targetTableName, + testDir, "--doPuts=false"); + assertTargetDoPutsFalse(70, UTIL2, sourceTableName, UTIL2, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); @@ -149,21 +172,21 @@ public void testSyncTableDoPutsFalse() throws Exception { assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); - TEST_UTIL.deleteTable(sourceTableName); - TEST_UTIL.deleteTable(targetTableName); + UTIL2.deleteTable(sourceTableName); + UTIL2.deleteTable(targetTableName); } @Test public void testSyncTableIgnoreTimestampsTrue() throws Exception { final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableIgnoreTimestampsTrue"); + Path testDir = UTIL1.getDataTestDirOnTestFS(name.getMethodName()); long current = EnvironmentEdgeManager.currentTime(); - writeTestData(sourceTableName, targetTableName, current - 1000, current); - hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true"); - Counters syncCounters = - syncTables(sourceTableName, targetTableName, testDir, "--ignoreTimestamps=true"); - assertEqualTables(90, sourceTableName, targetTableName, true); + writeTestData(UTIL1, sourceTableName, UTIL2, targetTableName, current - 1000, current); + hashSourceTable(UTIL1, sourceTableName, testDir, "--ignoreTimestamps=true"); + Counters syncCounters = syncTables(UTIL2.getConfiguration(), sourceTableName, targetTableName, + testDir, "--ignoreTimestamps=true", "--sourcezkcluster=" + UTIL1.getClusterKey()); + assertEqualTables(90, UTIL1, sourceTableName, UTIL2, targetTableName, true); assertEquals(50, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); @@ -172,256 +195,202 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { assertEquals(30, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); - TEST_UTIL.deleteTable(sourceTableName); - TEST_UTIL.deleteTable(targetTableName); + UTIL1.deleteTable(sourceTableName); + UTIL2.deleteTable(targetTableName); } - private void assertEqualTables(int expectedRows, TableName sourceTableName, - TableName targetTableName, boolean ignoreTimestamps) throws Exception { - Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); - Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); - - ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); - ResultScanner targetScanner = targetTable.getScanner(new Scan()); - - for (int i = 0; i < expectedRows; i++) { - Result sourceRow = sourceScanner.next(); - Result targetRow = targetScanner.next(); - - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); - - if (sourceRow == null) { - Assert.fail("Expected " + expectedRows + " source rows but only found " + i); - } - if (targetRow == null) { - Assert.fail("Expected " + expectedRows + " target rows but only found " + i); - } - Cell[] sourceCells = sourceRow.rawCells(); - Cell[] targetCells = targetRow.rawCells(); - if (sourceCells.length != targetCells.length) { - LOG.debug("Source cells: " + Arrays.toString(sourceCells)); - LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length - + " cells in source table but " + targetCells.length + " cells in target table"); - } - for (int j = 0; j < sourceCells.length; j++) { - Cell sourceCell = sourceCells[j]; - Cell targetCell = targetCells[j]; - try { - if (!CellUtil.matchingRows(sourceCell, targetCell)) { - Assert.fail("Rows don't match"); - } - if (!CellUtil.matchingFamily(sourceCell, targetCell)) { - Assert.fail("Families don't match"); - } - if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { - Assert.fail("Qualifiers don't match"); - } - if (!ignoreTimestamps && !CellUtil.matchingTimestamp(sourceCell, targetCell)) { - Assert.fail("Timestamps don't match"); - } - if (!CellUtil.matchingValue(sourceCell, targetCell)) { - Assert.fail("Values don't match"); - } - } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); - Throwables.propagate(t); - } - } - } - Result sourceRow = sourceScanner.next(); - if (sourceRow != null) { - Assert.fail("Source table has more than " + expectedRows + " rows. Next row: " - + Bytes.toInt(sourceRow.getRow())); + private void assertCellEquals(Cell sourceCell, Cell targetCell, BooleanSupplier checkTimestamp) { + assertTrue("Rows don't match, source: " + sourceCell + ", target: " + targetCell, + CellUtil.matchingRows(sourceCell, targetCell)); + assertTrue("Families don't match, source: " + sourceCell + ", target: " + targetCell, + CellUtil.matchingFamily(sourceCell, targetCell)); + assertTrue("Qualifiers don't match, source: " + sourceCell + ", target: " + targetCell, + CellUtil.matchingQualifier(sourceCell, targetCell)); + if (checkTimestamp.getAsBoolean()) { + assertTrue("Timestamps don't match, source: " + sourceCell + ", target: " + targetCell, + CellUtil.matchingTimestamp(sourceCell, targetCell)); } - Result targetRow = targetScanner.next(); - if (targetRow != null) { - Assert.fail("Target table has more than " + expectedRows + " rows. Next row: " - + Bytes.toInt(targetRow.getRow())); - } - sourceScanner.close(); - targetScanner.close(); - sourceTable.close(); - targetTable.close(); + assertTrue("Values don't match, source: " + sourceCell + ", target: " + targetCell, + CellUtil.matchingValue(sourceCell, targetCell)); } - private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableName, - TableName targetTableName) throws Exception { - Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); - Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); - - ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); - ResultScanner targetScanner = targetTable.getScanner(new Scan()); - Result targetRow = targetScanner.next(); - Result sourceRow = sourceScanner.next(); - int rowsCount = 0; - while (targetRow != null) { - rowsCount++; - // only compares values for existing rows, skipping rows existing on - // target only that were not deleted given --doDeletes=false - if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { - targetRow = targetScanner.next(); - continue; - } - - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); - - Cell[] sourceCells = sourceRow.rawCells(); - Cell[] targetCells = targetRow.rawCells(); - int targetRowKey = Bytes.toInt(targetRow.getRow()); - if (targetRowKey >= 70 && targetRowKey < 80) { - if (sourceCells.length == targetCells.length) { - LOG.debug("Source cells: " + Arrays.toString(sourceCells)); - LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert - .fail("Row " + targetRowKey + " should have more cells in " + "target than in source"); + private void assertEqualTables(int expectedRows, HBaseTestingUtil sourceCluster, + TableName sourceTableName, HBaseTestingUtil targetCluster, TableName targetTableName, + boolean ignoreTimestamps) throws Exception { + try (Table sourceTable = sourceCluster.getConnection().getTable(sourceTableName); + Table targetTable = targetCluster.getConnection().getTable(targetTableName); + ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); + ResultScanner targetScanner = targetTable.getScanner(new Scan())) { + for (int i = 0; i < expectedRows; i++) { + Result sourceRow = sourceScanner.next(); + Result targetRow = targetScanner.next(); + + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); + + if (sourceRow == null) { + fail("Expected " + expectedRows + " source rows but only found " + i); } - - } else { + if (targetRow == null) { + fail("Expected " + expectedRows + " target rows but only found " + i); + } + Cell[] sourceCells = sourceRow.rawCells(); + Cell[] targetCells = targetRow.rawCells(); if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + " cells in source table but " + targetCells.length + " cells in target table"); } - } - for (int j = 0; j < sourceCells.length; j++) { - Cell sourceCell = sourceCells[j]; - Cell targetCell = targetCells[j]; - try { - if (!CellUtil.matchingRows(sourceCell, targetCell)) { - Assert.fail("Rows don't match"); - } - if (!CellUtil.matchingFamily(sourceCell, targetCell)) { - Assert.fail("Families don't match"); - } - if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { - Assert.fail("Qualifiers don't match"); - } - if (targetRowKey < 80 && targetRowKey >= 90) { - if (!CellUtil.matchingTimestamp(sourceCell, targetCell)) { - Assert.fail("Timestamps don't match"); - } - } - if (!CellUtil.matchingValue(sourceCell, targetCell)) { - Assert.fail("Values don't match"); - } - } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); - Throwables.propagate(t); + for (int j = 0; j < sourceCells.length; j++) { + Cell sourceCell = sourceCells[j]; + Cell targetCell = targetCells[j]; + assertCellEquals(sourceCell, targetCell, () -> !ignoreTimestamps); } } - targetRow = targetScanner.next(); - sourceRow = sourceScanner.next(); + Result sourceRow = sourceScanner.next(); + if (sourceRow != null) { + fail("Source table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(sourceRow.getRow())); + } + Result targetRow = targetScanner.next(); + if (targetRow != null) { + fail("Target table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(targetRow.getRow())); + } } - assertEquals("Target expected rows does not match.", expectedRows, rowsCount); - sourceScanner.close(); - targetScanner.close(); - sourceTable.close(); - targetTable.close(); } - private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName, - TableName targetTableName) throws Exception { - Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); - Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); - - ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); - ResultScanner targetScanner = targetTable.getScanner(new Scan()); - Result targetRow = targetScanner.next(); - Result sourceRow = sourceScanner.next(); - int rowsCount = 0; - - while (targetRow != null) { - // only compares values for existing rows, skipping rows existing on - // source only that were not added to target given --doPuts=false - if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { - sourceRow = sourceScanner.next(); - continue; - } + private void assertTargetDoDeletesFalse(int expectedRows, HBaseTestingUtil sourceCluster, + TableName sourceTableName, HBaseTestingUtil targetCluster, TableName targetTableName) + throws Exception { + try (Table sourceTable = sourceCluster.getConnection().getTable(sourceTableName); + Table targetTable = targetCluster.getConnection().getTable(targetTableName); - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) - + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) - + " cells:" + targetRow); - - LOG.debug("rowsCount: " + rowsCount); - - Cell[] sourceCells = sourceRow.rawCells(); - Cell[] targetCells = targetRow.rawCells(); - int targetRowKey = Bytes.toInt(targetRow.getRow()); - if (targetRowKey >= 40 && targetRowKey < 60) { - LOG.debug("Source cells: " + Arrays.toString(sourceCells)); - LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("There shouldn't exist any rows between 40 and 60, since " - + "Puts are disabled and Deletes are enabled."); - } else if (targetRowKey >= 60 && targetRowKey < 70) { - if (sourceCells.length == targetCells.length) { - LOG.debug("Source cells: " + Arrays.toString(sourceCells)); - LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail( - "Row " + Bytes.toInt(sourceRow.getRow()) + " shouldn't have same number of cells."); + ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); + ResultScanner targetScanner = targetTable.getScanner(new Scan())) { + Result targetRow = targetScanner.next(); + Result sourceRow = sourceScanner.next(); + int rowsCount = 0; + while (targetRow != null) { + rowsCount++; + // only compares values for existing rows, skipping rows existing on + // target only that were not deleted given --doDeletes=false + if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { + targetRow = targetScanner.next(); + continue; } - } else if (targetRowKey >= 80 && targetRowKey < 90) { - LOG.debug("Source cells: " + Arrays.toString(sourceCells)); - LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("There should be no rows between 80 and 90 on target, as " - + "these had different timestamps and should had been deleted."); - } else if (targetRowKey >= 90 && targetRowKey < 100) { - for (int j = 0; j < sourceCells.length; j++) { - Cell sourceCell = sourceCells[j]; - Cell targetCell = targetCells[j]; - if (CellUtil.matchingValue(sourceCell, targetCell)) { - Assert.fail("Cells values should not match for rows between " - + "90 and 100. Target row id: " + (Bytes.toInt(targetRow.getRow()))); + + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); + + Cell[] sourceCells = sourceRow.rawCells(); + Cell[] targetCells = targetRow.rawCells(); + int targetRowKey = Bytes.toInt(targetRow.getRow()); + if (targetRowKey >= 70 && targetRowKey < 80) { + if (sourceCells.length == targetCells.length) { + LOG.debug("Source cells: " + Arrays.toString(sourceCells)); + LOG.debug("Target cells: " + Arrays.toString(targetCells)); + fail("Row " + targetRowKey + " should have more cells in " + "target than in source"); + } + + } else { + if (sourceCells.length != targetCells.length) { + LOG.debug("Source cells: " + Arrays.toString(sourceCells)); + LOG.debug("Target cells: " + Arrays.toString(targetCells)); + fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } } - } else { for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; Cell targetCell = targetCells[j]; - try { - if (!CellUtil.matchingRows(sourceCell, targetCell)) { - Assert.fail("Rows don't match"); - } - if (!CellUtil.matchingFamily(sourceCell, targetCell)) { - Assert.fail("Families don't match"); - } - if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { - Assert.fail("Qualifiers don't match"); - } - if (!CellUtil.matchingTimestamp(sourceCell, targetCell)) { - Assert.fail("Timestamps don't match"); - } - if (!CellUtil.matchingValue(sourceCell, targetCell)) { - Assert.fail("Values don't match"); + assertCellEquals(sourceCell, targetCell, () -> targetRowKey < 80 && targetRowKey >= 90); + } + targetRow = targetScanner.next(); + sourceRow = sourceScanner.next(); + } + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); + } + } + + private void assertTargetDoPutsFalse(int expectedRows, HBaseTestingUtil sourceCluster, + TableName sourceTableName, HBaseTestingUtil targetCluster, TableName targetTableName) + throws Exception { + try (Table sourceTable = sourceCluster.getConnection().getTable(sourceTableName); + Table targetTable = targetCluster.getConnection().getTable(targetTableName); + ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); + ResultScanner targetScanner = targetTable.getScanner(new Scan())) { + Result targetRow = targetScanner.next(); + Result sourceRow = sourceScanner.next(); + int rowsCount = 0; + + while (targetRow != null) { + // only compares values for existing rows, skipping rows existing on + // source only that were not added to target given --doPuts=false + if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { + sourceRow = sourceScanner.next(); + continue; + } + + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + + " cells:" + sourceRow); + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + + " cells:" + targetRow); + + LOG.debug("rowsCount: " + rowsCount); + + Cell[] sourceCells = sourceRow.rawCells(); + Cell[] targetCells = targetRow.rawCells(); + int targetRowKey = Bytes.toInt(targetRow.getRow()); + if (targetRowKey >= 40 && targetRowKey < 60) { + LOG.debug("Source cells: " + Arrays.toString(sourceCells)); + LOG.debug("Target cells: " + Arrays.toString(targetCells)); + fail("There shouldn't exist any rows between 40 and 60, since " + + "Puts are disabled and Deletes are enabled."); + } else if (targetRowKey >= 60 && targetRowKey < 70) { + if (sourceCells.length == targetCells.length) { + LOG.debug("Source cells: " + Arrays.toString(sourceCells)); + LOG.debug("Target cells: " + Arrays.toString(targetCells)); + fail( + "Row " + Bytes.toInt(sourceRow.getRow()) + " shouldn't have same number of cells."); + } + } else if (targetRowKey >= 80 && targetRowKey < 90) { + LOG.debug("Source cells: " + Arrays.toString(sourceCells)); + LOG.debug("Target cells: " + Arrays.toString(targetCells)); + fail("There should be no rows between 80 and 90 on target, as " + + "these had different timestamps and should had been deleted."); + } else if (targetRowKey >= 90 && targetRowKey < 100) { + for (int j = 0; j < sourceCells.length; j++) { + Cell sourceCell = sourceCells[j]; + Cell targetCell = targetCells[j]; + if (CellUtil.matchingValue(sourceCell, targetCell)) { + fail("Cells values should not match for rows between " + "90 and 100. Target row id: " + + Bytes.toInt(targetRow.getRow())); } - } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); - Throwables.propagate(t); + } + } else { + for (int j = 0; j < sourceCells.length; j++) { + Cell sourceCell = sourceCells[j]; + Cell targetCell = targetCells[j]; + assertCellEquals(sourceCell, targetCell, () -> true); } } + rowsCount++; + targetRow = targetScanner.next(); + sourceRow = sourceScanner.next(); } - rowsCount++; - targetRow = targetScanner.next(); - sourceRow = sourceScanner.next(); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); } - assertEquals("Target expected rows does not match.", expectedRows, rowsCount); - sourceScanner.close(); - targetScanner.close(); - sourceTable.close(); - targetTable.close(); } - private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir, - String... options) throws Exception { - SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); + private Counters syncTables(Configuration conf, TableName sourceTableName, + TableName targetTableName, Path testDir, String... options) throws Exception { + SyncTable syncTable = new SyncTable(conf); String[] args = Arrays.copyOf(options, options.length + 3); args[options.length] = testDir.toString(); args[options.length + 1] = sourceTableName.getNameAsString(); @@ -433,12 +402,12 @@ private Counters syncTables(TableName sourceTableName, TableName targetTableName return syncTable.counters; } - private void hashSourceTable(TableName sourceTableName, Path testDir, String... options) - throws Exception { + private void hashSourceTable(HBaseTestingUtil sourceCluster, TableName sourceTableName, + Path testDir, String... options) throws Exception { int numHashFiles = 3; long batchSize = 100; // should be 2 batches per region int scanBatch = 1; - HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); + HashTable hashTable = new HashTable(sourceCluster.getConfiguration()); String[] args = Arrays.copyOf(options, options.length + 5); args[options.length] = "--batchsize=" + batchSize; args[options.length + 1] = "--numhashfiles=" + numHashFiles; @@ -448,7 +417,7 @@ private void hashSourceTable(TableName sourceTableName, Path testDir, String... int code = hashTable.run(args); assertEquals("hash table job failed", 0, code); - FileSystem fs = TEST_UTIL.getTestFileSystem(); + FileSystem fs = sourceCluster.getTestFileSystem(); HashTable.TableHash tableHash = HashTable.TableHash.read(fs.getConf(), testDir); assertEquals(sourceTableName.getNameAsString(), tableHash.tableName); @@ -459,8 +428,9 @@ private void hashSourceTable(TableName sourceTableName, Path testDir, String... LOG.info("Hash table completed"); } - private void writeTestData(TableName sourceTableName, TableName targetTableName, - long... timestamps) throws Exception { + private void writeTestData(HBaseTestingUtil sourceCluster, TableName sourceTableName, + HBaseTestingUtil targetCluster, TableName targetTableName, long... timestamps) + throws Exception { final byte[] family = Bytes.toBytes("family"); final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); @@ -476,102 +446,100 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, timestamps = new long[] { current, current }; } - Table sourceTable = - TEST_UTIL.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); - - Table targetTable = - TEST_UTIL.createTable(targetTableName, family, generateSplits(numRows, targetRegions)); - - int rowIndex = 0; - // a bunch of identical rows - for (; rowIndex < 40; rowIndex++) { - Put sourcePut = new Put(Bytes.toBytes(rowIndex)); - sourcePut.addColumn(family, column1, timestamps[0], value1); - sourcePut.addColumn(family, column2, timestamps[0], value2); - sourceTable.put(sourcePut); - - Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1], value1); - targetPut.addColumn(family, column2, timestamps[1], value2); - targetTable.put(targetPut); - } - // some rows only in the source table - // ROWSWITHDIFFS: 10 - // TARGETMISSINGROWS: 10 - // TARGETMISSINGCELLS: 20 - for (; rowIndex < 50; rowIndex++) { - Put put = new Put(Bytes.toBytes(rowIndex)); - put.addColumn(family, column1, timestamps[0], value1); - put.addColumn(family, column2, timestamps[0], value2); - sourceTable.put(put); - } - // some rows only in the target table - // ROWSWITHDIFFS: 10 - // SOURCEMISSINGROWS: 10 - // SOURCEMISSINGCELLS: 20 - for (; rowIndex < 60; rowIndex++) { - Put put = new Put(Bytes.toBytes(rowIndex)); - put.addColumn(family, column1, timestamps[1], value1); - put.addColumn(family, column2, timestamps[1], value2); - targetTable.put(put); - } - // some rows with 1 missing cell in target table - // ROWSWITHDIFFS: 10 - // TARGETMISSINGCELLS: 10 - for (; rowIndex < 70; rowIndex++) { - Put sourcePut = new Put(Bytes.toBytes(rowIndex)); - sourcePut.addColumn(family, column1, timestamps[0], value1); - sourcePut.addColumn(family, column2, timestamps[0], value2); - sourceTable.put(sourcePut); - - Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1], value1); - targetTable.put(targetPut); - } - // some rows with 1 missing cell in source table - // ROWSWITHDIFFS: 10 - // SOURCEMISSINGCELLS: 10 - for (; rowIndex < 80; rowIndex++) { - Put sourcePut = new Put(Bytes.toBytes(rowIndex)); - sourcePut.addColumn(family, column1, timestamps[0], value1); - sourceTable.put(sourcePut); - - Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1], value1); - targetPut.addColumn(family, column2, timestamps[1], value2); - targetTable.put(targetPut); - } - // some rows differing only in timestamp - // ROWSWITHDIFFS: 10 - // SOURCEMISSINGCELLS: 20 - // TARGETMISSINGCELLS: 20 - for (; rowIndex < 90; rowIndex++) { - Put sourcePut = new Put(Bytes.toBytes(rowIndex)); - sourcePut.addColumn(family, column1, timestamps[0], column1); - sourcePut.addColumn(family, column2, timestamps[0], value2); - sourceTable.put(sourcePut); - - Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1] + 1, column1); - targetPut.addColumn(family, column2, timestamps[1] - 1, value2); - targetTable.put(targetPut); - } - // some rows with different values - // ROWSWITHDIFFS: 10 - // DIFFERENTCELLVALUES: 20 - for (; rowIndex < numRows; rowIndex++) { - Put sourcePut = new Put(Bytes.toBytes(rowIndex)); - sourcePut.addColumn(family, column1, timestamps[0], value1); - sourcePut.addColumn(family, column2, timestamps[0], value2); - sourceTable.put(sourcePut); - - Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1], value3); - targetPut.addColumn(family, column2, timestamps[1], value3); - targetTable.put(targetPut); + try ( + Table sourceTable = + sourceCluster.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); + Table targetTable = targetCluster.createTable(targetTableName, family, + generateSplits(numRows, targetRegions))) { + + int rowIndex = 0; + // a bunch of identical rows + for (; rowIndex < 40; rowIndex++) { + Put sourcePut = new Put(Bytes.toBytes(rowIndex)); + sourcePut.addColumn(family, column1, timestamps[0], value1); + sourcePut.addColumn(family, column2, timestamps[0], value2); + sourceTable.put(sourcePut); + + Put targetPut = new Put(Bytes.toBytes(rowIndex)); + targetPut.addColumn(family, column1, timestamps[1], value1); + targetPut.addColumn(family, column2, timestamps[1], value2); + targetTable.put(targetPut); + } + // some rows only in the source table + // ROWSWITHDIFFS: 10 + // TARGETMISSINGROWS: 10 + // TARGETMISSINGCELLS: 20 + for (; rowIndex < 50; rowIndex++) { + Put put = new Put(Bytes.toBytes(rowIndex)); + put.addColumn(family, column1, timestamps[0], value1); + put.addColumn(family, column2, timestamps[0], value2); + sourceTable.put(put); + } + // some rows only in the target table + // ROWSWITHDIFFS: 10 + // SOURCEMISSINGROWS: 10 + // SOURCEMISSINGCELLS: 20 + for (; rowIndex < 60; rowIndex++) { + Put put = new Put(Bytes.toBytes(rowIndex)); + put.addColumn(family, column1, timestamps[1], value1); + put.addColumn(family, column2, timestamps[1], value2); + targetTable.put(put); + } + // some rows with 1 missing cell in target table + // ROWSWITHDIFFS: 10 + // TARGETMISSINGCELLS: 10 + for (; rowIndex < 70; rowIndex++) { + Put sourcePut = new Put(Bytes.toBytes(rowIndex)); + sourcePut.addColumn(family, column1, timestamps[0], value1); + sourcePut.addColumn(family, column2, timestamps[0], value2); + sourceTable.put(sourcePut); + + Put targetPut = new Put(Bytes.toBytes(rowIndex)); + targetPut.addColumn(family, column1, timestamps[1], value1); + targetTable.put(targetPut); + } + // some rows with 1 missing cell in source table + // ROWSWITHDIFFS: 10 + // SOURCEMISSINGCELLS: 10 + for (; rowIndex < 80; rowIndex++) { + Put sourcePut = new Put(Bytes.toBytes(rowIndex)); + sourcePut.addColumn(family, column1, timestamps[0], value1); + sourceTable.put(sourcePut); + + Put targetPut = new Put(Bytes.toBytes(rowIndex)); + targetPut.addColumn(family, column1, timestamps[1], value1); + targetPut.addColumn(family, column2, timestamps[1], value2); + targetTable.put(targetPut); + } + // some rows differing only in timestamp + // ROWSWITHDIFFS: 10 + // SOURCEMISSINGCELLS: 20 + // TARGETMISSINGCELLS: 20 + for (; rowIndex < 90; rowIndex++) { + Put sourcePut = new Put(Bytes.toBytes(rowIndex)); + sourcePut.addColumn(family, column1, timestamps[0], column1); + sourcePut.addColumn(family, column2, timestamps[0], value2); + sourceTable.put(sourcePut); + + Put targetPut = new Put(Bytes.toBytes(rowIndex)); + targetPut.addColumn(family, column1, timestamps[1] + 1, column1); + targetPut.addColumn(family, column2, timestamps[1] - 1, value2); + targetTable.put(targetPut); + } + // some rows with different values + // ROWSWITHDIFFS: 10 + // DIFFERENTCELLVALUES: 20 + for (; rowIndex < numRows; rowIndex++) { + Put sourcePut = new Put(Bytes.toBytes(rowIndex)); + sourcePut.addColumn(family, column1, timestamps[0], value1); + sourcePut.addColumn(family, column2, timestamps[0], value2); + sourceTable.put(sourcePut); + + Put targetPut = new Put(Bytes.toBytes(rowIndex)); + targetPut.addColumn(family, column1, timestamps[1], value3); + targetPut.addColumn(family, column2, timestamps[1], value3); + targetTable.put(targetPut); + } } - - sourceTable.close(); - targetTable.close(); } } From 092ce0dca0b64155f38871cc734244e5022b45e9 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Fri, 7 Jun 2024 08:33:29 +0200 Subject: [PATCH 404/514] HBASE-28540 Cache Results in org.apache.hadoop.hbase.rest.client.RemoteHTable.Scanner (#5846) Signed-off-by: Duo Zhang --- .../hbase/rest/client/RemoteHTable.java | 32 +++++++++++++++---- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index bb80996b3194..53b5742ca93d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -23,6 +23,7 @@ import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.List; @@ -505,6 +506,8 @@ public TableDescriptor getDescriptor() throws IOException { class Scanner implements ResultScanner { String uri; + private Result[] cachedResults; + private int nextCachedResultsRow = 0; public Scanner(Scan scan) throws IOException { ScannerModel model; @@ -540,11 +543,8 @@ public Scanner(Scan scan) throws IOException { throw new IOException("scan request timed out"); } - @Override - public Result[] next(int nbRows) throws IOException { + public Result[] nextBatch() throws IOException { StringBuilder sb = new StringBuilder(uri); - sb.append("?n="); - sb.append(nbRows); for (int i = 0; i < maxRetries; i++) { Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); int code = response.getCode(); @@ -570,13 +570,31 @@ public Result[] next(int nbRows) throws IOException { throw new IOException("scanner.next request timed out"); } + private boolean updateCachedResults() throws IOException { + if (cachedResults == null || nextCachedResultsRow >= cachedResults.length) { + nextCachedResultsRow = 0; + cachedResults = nextBatch(); + } + return !(cachedResults == null || cachedResults.length < 1); + } + + @Override + public Result[] next(int nbRows) throws IOException { + if (!updateCachedResults()) { + return null; + } + int endIndex = Math.min(cachedResults.length, nextCachedResultsRow + nbRows); + Result[] chunk = Arrays.copyOfRange(cachedResults, nextCachedResultsRow, endIndex); + nextCachedResultsRow = endIndex; + return chunk; + } + @Override public Result next() throws IOException { - Result[] results = next(1); - if (results == null || results.length < 1) { + if (!updateCachedResults()) { return null; } - return results[0]; + return cachedResults[nextCachedResultsRow++]; } class Iter implements Iterator { From f136f0ace8a328f4a8489a66cb8397975b5777dd Mon Sep 17 00:00:00 2001 From: Junegunn Choi Date: Sat, 8 Jun 2024 12:29:19 +0900 Subject: [PATCH 405/514] HBASE-28549 Make shell commands support column qualifiers with colons (#5849) Signed-off-by: Duo Zhang --- hbase-shell/src/main/ruby/hbase/table.rb | 50 +++++++++++-------- hbase-shell/src/test/ruby/hbase/table_test.rb | 36 +++++++++++++ 2 files changed, 65 insertions(+), 21 deletions(-) diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index 5192c63aa6d3..d265ca09556b 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -135,7 +135,7 @@ def close # Put a cell 'value' at specified table/row/column def _put_internal(row, column, value, timestamp = nil, args = {}) p = org.apache.hadoop.hbase.client.Put.new(row.to_s.to_java_bytes) - family, qualifier = parse_column_name(column) + family, qualifier = split_column_name(column) if args.any? attributes = args[ATTRIBUTES] set_attributes(p, attributes) if attributes @@ -188,14 +188,14 @@ def _createdelete_internal(row, column = nil, end if column != "" if column && all_version - family, qualifier = parse_column_name(column) + family, qualifier = split_column_name(column) if qualifier d.addColumns(family, qualifier, timestamp) else d.addFamily(family, timestamp) end elsif column && !all_version - family, qualifier = parse_column_name(column) + family, qualifier = split_column_name(column) if qualifier d.addColumn(family, qualifier, timestamp) else @@ -273,7 +273,7 @@ def _incr_internal(row, column, value = nil, args = {}) value = 1 if value.is_a?(Hash) value ||= 1 incr = org.apache.hadoop.hbase.client.Increment.new(row.to_s.to_java_bytes) - family, qualifier = parse_column_name(column) + family, qualifier = split_column_name(column) if args.any? attributes = args[ATTRIBUTES] visibility = args[VISIBILITY] @@ -296,7 +296,7 @@ def _incr_internal(row, column, value = nil, args = {}) # appends the value atomically def _append_internal(row, column, value, args = {}) append = org.apache.hadoop.hbase.client.Append.new(row.to_s.to_java_bytes) - family, qualifier = parse_column_name(column) + family, qualifier = split_column_name(column) if args.any? attributes = args[ATTRIBUTES] visibility = args[VISIBILITY] @@ -491,7 +491,7 @@ def _get_internal(row, *args) #---------------------------------------------------------------------------------------------- # Fetches and decodes a counter value from hbase def _get_counter_internal(row, column) - family, qualifier = parse_column_name(column.to_s) + family, qualifier = split_column_name(column.to_s) # Format get request get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes) get.addColumn(family, qualifier) @@ -833,16 +833,16 @@ def convert_bytes(bytes, converter_class = nil, converter_method = nil) eval(converter_class).method(converter_method).call(bytes) end - def convert_bytes_with_position(bytes, offset, len, converter_class, converter_method) - # Avoid nil - converter_class ||= 'org.apache.hadoop.hbase.util.Bytes' - converter_method ||= 'toStringBinary' - eval(converter_class).method(converter_method).call(bytes, offset, len) - end - # store the information designating what part of a column should be printed, and how ColumnFormatSpec = Struct.new(:family, :qualifier, :converter) + # Use this instead of parse_column_name if the name cannot contain a converter + private def split_column_name(column) + # NOTE: We use 'take(2)' instead of 'to_a' to avoid type coercion of the nested byte arrays. + # https://github.com/jruby/jruby/blob/9.3.13.0/core/src/main/java/org/jruby/java/proxies/ArrayJavaProxy.java#L484-L488 + org.apache.hadoop.hbase.CellUtil.parseColumn(column.to_java_bytes).take(2) + end + ## # Parse the column specification for formatting used by shell commands like :scan # @@ -856,21 +856,29 @@ def convert_bytes_with_position(bytes, offset, len, converter_class, converter_m # @param [String] column # @return [ColumnFormatSpec] family, qualifier, and converter as Java bytes private def parse_column_format_spec(column) - split = org.apache.hadoop.hbase.CellUtil.parseColumn(column.to_java_bytes) - family = split[0] - qualifier = nil converter = nil - if split.length > 1 - parts = org.apache.hadoop.hbase.CellUtil.parseColumn(split[1]) - qualifier = parts[0] - if parts.length > 1 - converter = parts[1] + family, qualifier = split_column_name(column) + if qualifier + delim = org.apache.hadoop.hbase.KeyValue.getDelimiterInReverse( + qualifier, 0, qualifier.length, org.apache.hadoop.hbase.KeyValue::COLUMN_FAMILY_DELIMITER + ) + if delim >= 0 + prefix, suffix = qualifier[0...delim], qualifier[delim+1..-1] + if converter?(suffix.to_s) + qualifier = prefix + converter = suffix + end end end ColumnFormatSpec.new(family, qualifier, converter) end + # Check if the expression can be a converter + private def converter?(expr) + expr =~ /^c\(.+\)\..+/ || Bytes.respond_to?(expr) + end + private def set_column_converter(family, qualifier, converter) @converters["#{String.from_java_bytes(family)}:#{String.from_java_bytes(qualifier)}"] = String.from_java_bytes(converter) end diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb b/hbase-shell/src/test/ruby/hbase/table_test.rb index 8ed6f663dbd4..7b9a15fb841a 100644 --- a/hbase-shell/src/test/ruby/hbase/table_test.rb +++ b/hbase-shell/src/test/ruby/hbase/table_test.rb @@ -242,6 +242,42 @@ def teardown define_test "get_counter should return nil for non-existent counters" do assert_nil(@test_table._get_counter_internal(12345, 'x:qqqq')) end + + define_test "should work with qualifiers with colons" do + rowkey = "123" + + # Two columns with multiple colons in their qualifiers with the same prefix + col1 = "x:foo:bar:c1" + col2 = "x:foo:bar:c2" + + # Make sure that no data is present + @test_table.deleteall(rowkey) + + # Put two columns with colons in their qualifiers + @test_table.put(rowkey, col1, org.apache.hadoop.hbase.util.Bytes.toBytes(1)) + @test_table.put(rowkey, col2, org.apache.hadoop.hbase.util.Bytes.toBytes(2)) + assert_equal(2, @test_table._get_internal(rowkey).length) + + # Increment the second column by 10 => 2 + 10 => 12 + @test_table.incr(rowkey, col2, 10) + assert_equal(12, @test_table._get_counter_internal(rowkey, col2)) + + # Check the counter value using toLong converter + %w[:toLong :c(org.apache.hadoop.hbase.util.Bytes).toLong].each do |suffix| + res = @test_table._get_internal(rowkey, { COLUMNS => [col2 + suffix] }) + assert_not_nil(res) + assert_kind_of(Hash, res) + assert_not_nil(/value=12/.match(res[col2])) + end + + # Delete the first column + @test_table.delete(rowkey, col1) + assert_equal(1, @test_table._get_internal(rowkey).length) + + # Append twice to the deleted column + @test_table.append(rowkey, col1, '123') + assert_equal("123123", @test_table._append_internal(rowkey, col1, '123')) + end end # Complex data management methods tests From c5d3f652f25ada20e741b693d6a5c3559de861d7 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 11 Jun 2024 06:08:18 +0200 Subject: [PATCH 406/514] HBASE-28649 Wrong properties are used to set up SSL for REST Client Kerberos authenticator (#5975) Signed-off-by: Ankit Singhal --- .../main/java/org/apache/hadoop/hbase/rest/client/Client.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 620497d08ba7..5c89dea48e74 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -523,8 +523,8 @@ private Configuration setupTrustStoreForHadoop(KeyStore trustStore) Configuration sslConf = new Configuration(); // Type is the Java default, we use the same JVM to read this back - sslConf.set("ssl.client.keystore.location", trustStoreFile.getAbsolutePath()); - sslConf.set("ssl.client.keystore.password", password); + sslConf.set("ssl.client.truststore.location", trustStoreFile.getAbsolutePath()); + sslConf.set("ssl.client.truststore.password", password); return sslConf; } From 580361820f6d678ef8044dc2b1a8b00b1b30c5ee Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 11 Jun 2024 22:03:35 +0800 Subject: [PATCH 407/514] HBASE-28651 Reformat the javadoc for CellChunkMap (#5977) Signed-off-by: Yi Mei --- .../hbase/regionserver/CellChunkMap.java | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java index 8a5f28c2870d..e4bfcf05ab2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java @@ -27,18 +27,28 @@ /** * CellChunkMap is an array of serialized representations of Cell (pointing to Chunks with full Cell - * data) and can be allocated both off-heap and on-heap. CellChunkMap is a byte array (chunk) - * holding all that is needed to access a Cell, which is actually saved on another deeper chunk. Per - * Cell we have a reference to this deeper byte array B (chunk ID, integer), offset in bytes in B - * (integer), length in bytes in B (integer) and seqID of the cell (long). In order to save - * reference to byte array we use the Chunk's ID given by ChunkCreator. The CellChunkMap memory - * layout on chunk A relevant to a deeper byte array B, holding the actual cell data: < header > - * <--------------- first Cell -----------------> <-- second Cell ... + * data) and can be allocated both off-heap and on-heap. + *

    + * CellChunkMap is a byte array (chunk) holding all that is needed to access a Cell, which is + * actually saved on another deeper chunk. Per Cell we have a reference to this deeper byte array B + * (chunk ID, integer), offset in bytes in B (integer), length in bytes in B (integer) and seqID of + * the cell (long). In order to save reference to byte array we use the Chunk's ID given by + * ChunkCreator. + *

    + * The CellChunkMap memory layout on chunk A relevant to a deeper byte array B, holding the actual + * cell data: + * + *

    + *
    + * < header > <---------------     first Cell     -----------------> <-- second Cell ...
      * --------------------------------------------------------------------------------------- ...
    - * integer | integer | integer | integer | long | 4 bytes | 4 bytes | 4 bytes | 4 bytes | 8 bytes |
    - * ChunkID | chunkID of | offset in B | length of | sequence | ... of this | chunk B with | where
    - * Cell's | Cell's | ID of | chunk A | Cell data | data starts | data in B | the Cell |
    + *  integer  | integer      | integer      | integer     | long     |
    + *  4 bytes  | 4 bytes      | 4 bytes      | 4 bytes     | 8 bytes  |
    + *  ChunkID  | chunkID of   | offset in B  | length of   | sequence |          ...
    + *  of this  | chunk B with | where Cell's | Cell's      | ID of    |
    + *  chunk A  | Cell data    | data starts  | data in B   | the Cell |
      * --------------------------------------------------------------------------------------- ...
    + * 
    */ @InterfaceAudience.Private public class CellChunkMap extends CellFlatMap { @@ -71,7 +81,7 @@ public CellChunkMap(Comparator comparator, Chunk[] chunks, int min } } - /* + /** * To be used by base (CellFlatMap) class only to create a sub-CellFlatMap Should be used only to * create only CellChunkMap from CellChunkMap */ From 91b351264dec4ecf08103577c6bfa51da1197c39 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 11 Jun 2024 20:00:44 +0200 Subject: [PATCH 408/514] HBASE-28646 Use Streams to unmarshall protobuf REST data (#5974) Signed-off-by: Duo Zhang --- .../hbase/rest/ProtobufMessageHandler.java | 33 ++++++++++++++----- .../apache/hadoop/hbase/rest/RestUtil.java | 17 ++++++++++ .../hadoop/hbase/rest/model/CellModel.java | 7 ++-- .../hadoop/hbase/rest/model/CellSetModel.java | 7 ++-- .../rest/model/NamespacesInstanceModel.java | 6 ++-- .../hbase/rest/model/NamespacesModel.java | 6 ++-- .../hadoop/hbase/rest/model/RowModel.java | 3 +- .../hadoop/hbase/rest/model/ScannerModel.java | 7 ++-- .../rest/model/StorageClusterStatusModel.java | 7 ++-- .../hbase/rest/model/TableInfoModel.java | 7 ++-- .../hbase/rest/model/TableListModel.java | 7 ++-- .../hbase/rest/model/TableSchemaModel.java | 7 ++-- .../hadoop/hbase/rest/model/VersionModel.java | 7 ++-- .../consumer/ProtobufMessageBodyConsumer.java | 16 +-------- 14 files changed, 84 insertions(+), 53 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index 962e5dfae860..1b84dc7c93da 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @@ -47,7 +49,7 @@ default void writeProtobufOutput(OutputStream os) throws IOException { } /** - * Returns the protobuf represention of the model in a byte array Use + * Returns the protobuf represention of the model in a byte array. Use * {@link org.apache.hadoop.hbase.rest.ProtobufMessageHandler#writeProtobufOutput(OutputStream)} * for better performance * @return the protobuf encoded object in a byte array @@ -63,15 +65,28 @@ default byte[] createProtobufOutput() { Message messageFromObject(); /** - * Initialize the model from a protobuf representation. + * Initialize the model from a protobuf representation. Use + * {@link org.apache.hadoop.hbase.rest.ProtobufMessageHandler#getObjectFromMessage(InputStream)} + * for better performance * @param message the raw bytes of the protobuf message * @return reference to self for convenience */ - // TODO implement proper stream handling for unmarshalling. - // Using byte array here lets us use ProtobufUtil.mergeFrom in the implementations to - // avoid the CodedOutputStream size limitation, but is slow - // and memory intensive. We could use the ProtobufUtil.mergeFrom() variant that takes - // an inputStream and sets the size limit to maxInt. - // This would help both on the client side, and when processing large Puts on the server. - ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; + default ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + final CodedInputStream codedInput = CodedInputStream.newInstance(message); + codedInput.setSizeLimit(message.length); + return getObjectFromMessage(codedInput); + } + + /** + * Initialize the model from a protobuf representation. + * @param is InputStream providing the protobuf message + * @return reference to self for convenience + */ + default ProtobufMessageHandler getObjectFromMessage(InputStream is) throws IOException { + final CodedInputStream codedInput = CodedInputStream.newInstance(is); + codedInput.setSizeLimit(Integer.MAX_VALUE); + return getObjectFromMessage(codedInput); + } + + ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java index 5f884c510d6d..ffd5fb208d7f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java @@ -17,12 +17,16 @@ */ package org.apache.hadoop.hbase.rest; +import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.rest.model.CellModel; import org.apache.hadoop.hbase.rest.model.RowModel; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + @InterfaceAudience.Private public final class RestUtil { @@ -45,4 +49,17 @@ public static RowModel createRowModelFromResult(Result r) { } return rowModel; } + + /** + * Merges the object from codedInput, then calls checkLastTagWas. This is based on + * ProtobufUtil.mergeFrom, but we have already taken care of setSizeLimit() before calling, so + * only the checkLastTagWas() call is retained. + * @param builder protobuf object builder + * @param codedInput encoded object data + */ + public static void mergeFrom(Message.Builder builder, CodedInputStream codedInput) + throws IOException { + builder.mergeFrom(codedInput); + codedInput.checkLastTagWas(0); + } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index 3d8806b7dc00..00475518103d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -34,12 +34,13 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; /** @@ -218,9 +219,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { Cell.Builder builder = Cell.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); setColumn(builder.getColumn().toByteArray()); setValue(builder.getData().toByteArray()); if (builder.hasTimestamp()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index 8486be2762fe..2a7462742545 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -29,12 +29,13 @@ import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellSetMessage.CellSet; @@ -139,9 +140,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { CellSet.Builder builder = CellSet.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); for (CellSet.Row row : builder.getRowsList()) { RowModel rowModel = new RowModel(row.getKey().toByteArray()); for (Cell cell : row.getValuesList()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index 78f647203851..ee5fed13b57f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -29,8 +29,10 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacePropertiesMessage.NamespaceProperties; @@ -157,9 +159,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); - builder.mergeFrom(message); + RestUtil.mergeFrom(builder, cis); List properties = builder.getPropsList(); for (NamespaceProperties.Property property : properties) { addProperty(property.getKey(), property.getValue()); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index 90e4f6560a51..e13e56333227 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -29,8 +29,10 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacesMessage.Namespaces; @@ -104,9 +106,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { Namespaces.Builder builder = Namespaces.newBuilder(); - builder.mergeFrom(message); + RestUtil.mergeFrom(builder, cis); namespaces = builder.getNamespaceList(); return this; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index e200dfbc1f35..3b5e274d442f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** @@ -187,7 +188,7 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream is) throws IOException { // there is no standalone row protobuf message throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 9bd740c1b3b9..a18a9ba427ab 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -67,17 +67,18 @@ import org.apache.hadoop.hbase.filter.ValueFilter; import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.ScannerMessage.Scanner; /** @@ -925,9 +926,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { Scanner.Builder builder = Scanner.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); if (builder.hasStartRow()) { startRow = builder.getStartRow().toByteArray(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index c9370cad901b..ae3671a509f7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -27,13 +27,14 @@ import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; /** @@ -713,9 +714,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); if (builder.hasRegions()) { regions = builder.getRegions(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java index 43b131fcb701..9656f0c82145 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -25,12 +25,13 @@ import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableInfoMessage.TableInfo; /** @@ -140,9 +141,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { TableInfo.Builder builder = TableInfo.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); setName(builder.getName()); for (TableInfo.Region region : builder.getRegionsList()) { add( diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java index 63b2e809279c..7a3430e03d48 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -24,11 +24,12 @@ import javax.xml.bind.annotation.XmlElementRef; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableListMessage.TableList; /** @@ -101,9 +102,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { TableList.Builder builder = TableList.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); for (String table : builder.getNameList()) { this.add(new TableModel(table)); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index f2a8c4c7060d..3e28afdfbaef 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -39,12 +39,13 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableSchemaMessage.TableSchema; @@ -287,9 +288,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { TableSchema.Builder builder = TableSchema.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); this.setName(builder.getName()); for (TableSchema.Attribute attr : builder.getAttrsList()) { this.addAttribute(attr.getName(), attr.getValue()); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java index 65eca57ac5a3..359211ef5e28 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -24,12 +24,13 @@ import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.RESTServlet; +import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.VersionMessage.Version; /** @@ -174,9 +175,9 @@ public Message messageFromObject() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + public ProtobufMessageHandler getObjectFromMessage(CodedInputStream cis) throws IOException { Version.Builder builder = Version.newBuilder(); - ProtobufUtil.mergeFrom(builder, message); + RestUtil.mergeFrom(builder, cis); if (builder.hasRestVersion()) { restVersion = builder.getRestVersion(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java index 7c3f6f8ea401..340962730e7b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.rest.provider.consumer; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.lang.annotation.Annotation; @@ -59,23 +58,10 @@ public ProtobufMessageHandler readFrom(Class type, Type ProtobufMessageHandler obj = null; try { obj = type.getDeclaredConstructor().newInstance(); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - byte[] buffer = new byte[4096]; - int read; - do { - read = inputStream.read(buffer, 0, buffer.length); - if (read > 0) { - baos.write(buffer, 0, read); - } - } while (read > 0); - if (LOG.isTraceEnabled()) { - LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + inputStream); - } - obj = obj.getObjectFromMessage(baos.toByteArray()); + return obj.getObjectFromMessage(inputStream); } catch (InstantiationException | NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { throw new WebApplicationException(e); } - return obj; } } From 317ad3c2918c69f119fb5839292decd73c3dba7c Mon Sep 17 00:00:00 2001 From: KhyatiVaghamshi Date: Tue, 11 Jun 2024 12:15:52 -0700 Subject: [PATCH 409/514] HBASE-28049 RSProcedureDispatcher to log the request details during retries (#5973) Signed-off-by: Viraj Jasani Signed-off-by: Pankaj Kumar --- .../hadoop/hbase/master/procedure/RSProcedureDispatcher.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index abc9c575a62e..dfd8c9587b27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -314,8 +314,8 @@ private boolean scheduleForRetry(IOException e) { LOG.warn("{} is aborted or stopped, for safety we still need to" + " wait until it is fully dead, try={}", serverName, numberOfAttemptsSoFar); } else { - LOG.warn("request to {} failed due to {}, try={}, retrying...", serverName, e.toString(), - numberOfAttemptsSoFar); + LOG.warn("request to {} failed due to {}, try={}, retrying... , request params: {}", + serverName, e.toString(), numberOfAttemptsSoFar, request.build()); } numberOfAttemptsSoFar++; // Add some backoff here as the attempts rise otherwise if a stuck condition, will fill logs From 9bdee6d43f1ebada7f1f9aa47405c67fae55a4f4 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 12 Jun 2024 18:25:01 +0800 Subject: [PATCH 410/514] HBASE-28565 Make map reduce jobs accept connection uri when specifying peer cluster (#5972) Signed-off-by: Nick Dimiduk --- .../hadoop/hbase/mapreduce/CopyTable.java | 260 ++++++++++-------- .../hadoop/hbase/mapreduce/SyncTable.java | 92 ++++++- .../hbase/mapreduce/TableMapReduceUtil.java | 157 ++++++++--- .../hbase/mapreduce/TableOutputFormat.java | 52 +++- .../replication/VerifyReplication.java | 55 +++- ...va => CopyTableToPeerClusterTestBase.java} | 32 +-- ...tCopyTableToPeerClusterWithClusterKey.java | 38 +++ .../TestCopyTableToPeerClusterWithRpcUri.java | 38 +++ .../TestCopyTableToPeerClusterWithZkUri.java | 38 +++ .../hadoop/hbase/mapreduce/TestSyncTable.java | 10 +- .../mapreduce/TestTableMapReduceUtil.java | 48 +++- .../TestVerifyReplicationAdjunct.java | 22 +- ...TestVerifyReplicationRpcConnectionUri.java | 38 +++ .../TestVerifyReplicationZkClusterKey.java | 38 +++ .../TestVerifyReplicationZkConnectionUri.java | 38 +++ ...on.java => VerifyReplicationTestBase.java} | 41 +-- 16 files changed, 742 insertions(+), 255 deletions(-) rename hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/{TestCopyTableToPeerCluster.java => CopyTableToPeerClusterTestBase.java} (76%) create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java rename hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/{TestVerifyReplication.java => VerifyReplicationTestBase.java} (94%) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index 8564c105331e..8a26972c4810 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.util.HashMap; import java.util.Map; import java.util.UUID; @@ -63,6 +65,11 @@ public class CopyTable extends Configured implements Tool { String startRow = null; String stopRow = null; String dstTableName = null; + URI peerUri = null; + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. Use {@link #peerUri} instead. + */ + @Deprecated String peerAddress = null; String families = null; boolean allCells = false; @@ -89,7 +96,7 @@ private Path generateUniqTempDir(boolean withDirCreated) throws IOException { return newDir; } - private void initCopyTableMapperReducerJob(Job job, Scan scan) throws IOException { + private void initCopyTableMapperJob(Job job, Scan scan) throws IOException { Class mapper = bulkload ? CellImporter.class : Importer.class; if (readingSnapshot) { TableMapReduceUtil.initTableSnapshotMapperJob(snapshot, scan, mapper, null, null, job, true, @@ -166,7 +173,7 @@ public Job createSubmittableJob(String[] args) throws IOException { job.setNumReduceTasks(0); if (bulkload) { - initCopyTableMapperReducerJob(job, scan); + initCopyTableMapperJob(job, scan); // We need to split the inputs by destination tables so that output of Map can be bulk-loaded. TableInputFormat.configureSplitTable(job, TableName.valueOf(dstTableName)); @@ -180,8 +187,15 @@ public Job createSubmittableJob(String[] args) throws IOException { admin.getDescriptor((TableName.valueOf(dstTableName)))); } } else { - initCopyTableMapperReducerJob(job, scan); - TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress); + initCopyTableMapperJob(job, scan); + if (peerUri != null) { + TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerUri); + } else if (peerAddress != null) { + TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress); + } else { + TableMapReduceUtil.initTableReducerJob(dstTableName, null, job); + } + } return job; @@ -195,7 +209,7 @@ private static void printUsage(final String errorMsg) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " - + "[--new.name=NEW] [--peer.adr=ADR] "); + + "[--new.name=NEW] [--peer.uri=URI|--peer.adr=ADR] "); System.err.println(); System.err.println("Options:"); System.err.println(" rs.class hbase.regionserver.class of the peer cluster"); @@ -208,9 +222,12 @@ private static void printUsage(final String errorMsg) { System.err.println(" endtime end of the time range. Ignored if no starttime specified."); System.err.println(" versions number of cell versions to copy"); System.err.println(" new.name new table's name"); + System.err.println(" peer.uri The URI of the peer cluster"); System.err.println(" peer.adr Address of the peer cluster given in the format"); System.err.println(" hbase.zookeeper.quorum:hbase.zookeeper.client" + ".port:zookeeper.znode.parent"); + System.err.println(" Do not take effect if peer.uri is specified"); + System.err.println(" Deprecated, please use peer.uri instead"); System.err.println(" families comma-separated list of families to copy"); System.err.println(" To copy from cf1 to cf2, give sourceCfName:destCfName. "); System.err.println(" To keep the same name, just give \"cfName\""); @@ -247,144 +264,149 @@ private boolean doCommandLine(final String[] args) { printUsage(null); return false; } - try { - for (int i = 0; i < args.length; i++) { - String cmd = args[i]; - if (cmd.equals("-h") || cmd.startsWith("--h")) { - printUsage(null); - return false; - } - - final String startRowArgKey = "--startrow="; - if (cmd.startsWith(startRowArgKey)) { - startRow = cmd.substring(startRowArgKey.length()); - continue; - } - - final String stopRowArgKey = "--stoprow="; - if (cmd.startsWith(stopRowArgKey)) { - stopRow = cmd.substring(stopRowArgKey.length()); - continue; - } - - final String startTimeArgKey = "--starttime="; - if (cmd.startsWith(startTimeArgKey)) { - startTime = Long.parseLong(cmd.substring(startTimeArgKey.length())); - continue; - } - - final String endTimeArgKey = "--endtime="; - if (cmd.startsWith(endTimeArgKey)) { - endTime = Long.parseLong(cmd.substring(endTimeArgKey.length())); - continue; - } - - final String batchArgKey = "--batch="; - if (cmd.startsWith(batchArgKey)) { - batch = Integer.parseInt(cmd.substring(batchArgKey.length())); - continue; - } - - final String cacheRowArgKey = "--cacheRow="; - if (cmd.startsWith(cacheRowArgKey)) { - cacheRow = Integer.parseInt(cmd.substring(cacheRowArgKey.length())); - continue; - } + for (int i = 0; i < args.length; i++) { + String cmd = args[i]; + if (cmd.equals("-h") || cmd.startsWith("--h")) { + printUsage(null); + return false; + } - final String versionsArgKey = "--versions="; - if (cmd.startsWith(versionsArgKey)) { - versions = Integer.parseInt(cmd.substring(versionsArgKey.length())); - continue; - } + final String startRowArgKey = "--startrow="; + if (cmd.startsWith(startRowArgKey)) { + startRow = cmd.substring(startRowArgKey.length()); + continue; + } - final String newNameArgKey = "--new.name="; - if (cmd.startsWith(newNameArgKey)) { - dstTableName = cmd.substring(newNameArgKey.length()); - continue; - } + final String stopRowArgKey = "--stoprow="; + if (cmd.startsWith(stopRowArgKey)) { + stopRow = cmd.substring(stopRowArgKey.length()); + continue; + } - final String peerAdrArgKey = "--peer.adr="; - if (cmd.startsWith(peerAdrArgKey)) { - peerAddress = cmd.substring(peerAdrArgKey.length()); - continue; - } + final String startTimeArgKey = "--starttime="; + if (cmd.startsWith(startTimeArgKey)) { + startTime = Long.parseLong(cmd.substring(startTimeArgKey.length())); + continue; + } - final String familiesArgKey = "--families="; - if (cmd.startsWith(familiesArgKey)) { - families = cmd.substring(familiesArgKey.length()); - continue; - } + final String endTimeArgKey = "--endtime="; + if (cmd.startsWith(endTimeArgKey)) { + endTime = Long.parseLong(cmd.substring(endTimeArgKey.length())); + continue; + } - if (cmd.startsWith("--all.cells")) { - allCells = true; - continue; - } + final String batchArgKey = "--batch="; + if (cmd.startsWith(batchArgKey)) { + batch = Integer.parseInt(cmd.substring(batchArgKey.length())); + continue; + } - if (cmd.startsWith("--bulkload")) { - bulkload = true; - continue; - } + final String cacheRowArgKey = "--cacheRow="; + if (cmd.startsWith(cacheRowArgKey)) { + cacheRow = Integer.parseInt(cmd.substring(cacheRowArgKey.length())); + continue; + } - if (cmd.startsWith("--shuffle")) { - shuffle = true; - continue; - } + final String versionsArgKey = "--versions="; + if (cmd.startsWith(versionsArgKey)) { + versions = Integer.parseInt(cmd.substring(versionsArgKey.length())); + continue; + } - if (cmd.startsWith("--snapshot")) { - readingSnapshot = true; - continue; - } + final String newNameArgKey = "--new.name="; + if (cmd.startsWith(newNameArgKey)) { + dstTableName = cmd.substring(newNameArgKey.length()); + continue; + } - if (i == args.length - 1) { - if (readingSnapshot) { - snapshot = cmd; - } else { - tableName = cmd; - } - } else { - printUsage("Invalid argument '" + cmd + "'"); + final String peerUriArgKey = "--peer.uri="; + if (cmd.startsWith(peerUriArgKey)) { + try { + peerUri = new URI(cmd.substring(peerUriArgKey.length())); + } catch (URISyntaxException e) { + LOG.error("Malformed peer uri specified: {}", cmd, e); return false; } + continue; } - if (dstTableName == null && peerAddress == null) { - printUsage("At least a new table name or a peer address must be specified"); - return false; + + final String peerAdrArgKey = "--peer.adr="; + if (cmd.startsWith(peerAdrArgKey)) { + peerAddress = cmd.substring(peerAdrArgKey.length()); + continue; } - if ((endTime != 0) && (startTime > endTime)) { - printUsage("Invalid time range filter: starttime=" + startTime + " > endtime=" + endTime); - return false; + + final String familiesArgKey = "--families="; + if (cmd.startsWith(familiesArgKey)) { + families = cmd.substring(familiesArgKey.length()); + continue; } - if (bulkload && peerAddress != null) { - printUsage("Remote bulkload is not supported!"); - return false; + if (cmd.startsWith("--all.cells")) { + allCells = true; + continue; } - if (readingSnapshot && peerAddress != null) { - printUsage("Loading data from snapshot to remote peer cluster is not supported."); - return false; + if (cmd.startsWith("--bulkload")) { + bulkload = true; + continue; } - if (readingSnapshot && dstTableName == null) { - printUsage("The --new.name= for destination table should be " - + "provided when copying data from snapshot ."); - return false; + if (cmd.startsWith("--shuffle")) { + shuffle = true; + continue; } - if (readingSnapshot && snapshot == null) { - printUsage("Snapshot shouldn't be null when --snapshot is enabled."); - return false; + if (cmd.startsWith("--snapshot")) { + readingSnapshot = true; + continue; } - // set dstTableName if necessary - if (dstTableName == null) { - dstTableName = tableName; + if (i == args.length - 1) { + if (readingSnapshot) { + snapshot = cmd; + } else { + tableName = cmd; + } + } else { + printUsage("Invalid argument '" + cmd + "'"); + return false; } - } catch (Exception e) { - LOG.error("Failed to parse commandLine arguments", e); - printUsage("Can't start because " + e.getMessage()); + } + if (dstTableName == null && peerAddress == null) { + printUsage("At least a new table name or a peer address must be specified"); + return false; + } + if ((endTime != 0) && (startTime > endTime)) { + printUsage("Invalid time range filter: starttime=" + startTime + " > endtime=" + endTime); return false; } + + if (bulkload && (peerUri != null || peerAddress != null)) { + printUsage("Remote bulkload is not supported!"); + return false; + } + + if (readingSnapshot && (peerUri != null || peerAddress != null)) { + printUsage("Loading data from snapshot to remote peer cluster is not supported."); + return false; + } + + if (readingSnapshot && dstTableName == null) { + printUsage("The --new.name=
    for destination table should be " + + "provided when copying data from snapshot ."); + return false; + } + + if (readingSnapshot && snapshot == null) { + printUsage("Snapshot shouldn't be null when --snapshot is enabled."); + return false; + } + + // set dstTableName if necessary + if (dstTableName == null) { + dstTableName = tableName; + } return true; } @@ -401,7 +423,9 @@ public static void main(String[] args) throws Exception { @Override public int run(String[] args) throws Exception { Job job = createSubmittableJob(args); - if (job == null) return 1; + if (job == null) { + return 1; + } if (!job.waitForCompletion(true)) { LOG.info("Map-reduce job failed!"); if (bulkload) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index 146f4ec6511f..3b083b33dbdf 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -18,8 +18,11 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Collections; import java.util.Iterator; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; @@ -65,7 +68,17 @@ public class SyncTable extends Configured implements Tool { static final String SOURCE_HASH_DIR_CONF_KEY = "sync.table.source.hash.dir"; static final String SOURCE_TABLE_CONF_KEY = "sync.table.source.table.name"; static final String TARGET_TABLE_CONF_KEY = "sync.table.target.table.name"; + static final String SOURCE_URI_CONF_KEY = "sync.table.source.uri"; + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0 Use {@link #SOURCE_URI_CONF_KEY} instead. + */ + @Deprecated static final String SOURCE_ZK_CLUSTER_CONF_KEY = "sync.table.source.zk.cluster"; + static final String TARGET_URI_CONF_KEY = "sync.table.target.uri"; + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0 Use {@link #TARGET_URI_CONF_KEY} instead. + */ + @Deprecated static final String TARGET_ZK_CLUSTER_CONF_KEY = "sync.table.target.zk.cluster"; static final String DRY_RUN_CONF_KEY = "sync.table.dry.run"; static final String DO_DELETES_CONF_KEY = "sync.table.do.deletes"; @@ -76,7 +89,17 @@ public class SyncTable extends Configured implements Tool { String sourceTableName; String targetTableName; + URI sourceUri; + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0 Use {@link #sourceUri} instead. + */ + @Deprecated String sourceZkCluster; + URI targetUri; + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0 Use {@link #targetUri} instead. + */ + @Deprecated String targetZkCluster; boolean dryRun; boolean doDeletes = true; @@ -89,9 +112,9 @@ public SyncTable(Configuration conf) { super(conf); } - private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { + private void initCredentialsForHBase(String clusterKey, Job job) throws IOException { Configuration peerConf = - HBaseConfiguration.createClusterConf(job.getConfiguration(), zookeeper); + HBaseConfiguration.createClusterConf(job.getConfiguration(), clusterKey); TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } @@ -142,11 +165,17 @@ public Job createSubmittableJob(String[] args) throws IOException { jobConf.set(SOURCE_HASH_DIR_CONF_KEY, sourceHashDir.toString()); jobConf.set(SOURCE_TABLE_CONF_KEY, sourceTableName); jobConf.set(TARGET_TABLE_CONF_KEY, targetTableName); - if (sourceZkCluster != null) { + if (sourceUri != null) { + jobConf.set(SOURCE_URI_CONF_KEY, sourceUri.toString()); + TableMapReduceUtil.initCredentialsForCluster(job, jobConf, sourceUri); + } else if (sourceZkCluster != null) { jobConf.set(SOURCE_ZK_CLUSTER_CONF_KEY, sourceZkCluster); initCredentialsForHBase(sourceZkCluster, job); } - if (targetZkCluster != null) { + if (targetUri != null) { + jobConf.set(TARGET_URI_CONF_KEY, targetUri.toString()); + TableMapReduceUtil.initCredentialsForCluster(job, jobConf, targetUri); + } else if (targetZkCluster != null) { jobConf.set(TARGET_ZK_CLUSTER_CONF_KEY, targetZkCluster); initCredentialsForHBase(targetZkCluster, job); } @@ -165,8 +194,11 @@ public Job createSubmittableJob(String[] args) throws IOException { } else { // No reducers. Just write straight to table. Call initTableReducerJob // because it sets up the TableOutputFormat. - TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster); - + if (targetUri != null) { + TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetUri); + } else { + TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null, targetZkCluster); + } // would be nice to add an option for bulk load instead } @@ -214,9 +246,10 @@ public static enum Counter { protected void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); sourceHashDir = new Path(conf.get(SOURCE_HASH_DIR_CONF_KEY)); - sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY, null); - targetConnection = - openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY, TableOutputFormat.OUTPUT_CONF_PREFIX); + sourceConnection = + openConnection(conf, SOURCE_URI_CONF_KEY, SOURCE_ZK_CLUSTER_CONF_KEY, null); + targetConnection = openConnection(conf, TARGET_URI_CONF_KEY, TARGET_ZK_CLUSTER_CONF_KEY, + TableOutputFormat.OUTPUT_CONF_PREFIX); sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY); targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY); dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false); @@ -241,12 +274,22 @@ protected void setup(Context context) throws IOException { targetHasher.ignoreTimestamps = ignoreTimestamp; } - private static Connection openConnection(Configuration conf, String zkClusterConfKey, - String configPrefix) throws IOException { - String zkCluster = conf.get(zkClusterConfKey); - Configuration clusterConf = - HBaseConfiguration.createClusterConf(conf, zkCluster, configPrefix); - return ConnectionFactory.createConnection(clusterConf); + private static Connection openConnection(Configuration conf, String uriConfKey, + String zkClusterConfKey, String configPrefix) throws IOException { + String uri = conf.get(uriConfKey); + if (!StringUtils.isBlank(uri)) { + try { + return ConnectionFactory.createConnection(new URI(uri), conf); + } catch (URISyntaxException e) { + throw new IOException( + "malformed connection uri: " + uri + ", please check config " + uriConfKey, e); + } + } else { + String zkCluster = conf.get(zkClusterConfKey); + Configuration clusterConf = + HBaseConfiguration.createClusterConf(conf, zkCluster, configPrefix); + return ConnectionFactory.createConnection(clusterConf); + } } private static Table openTable(Connection connection, Configuration conf, @@ -747,10 +790,18 @@ private static void printUsage(final String errorMsg) { System.err.println(); System.err.println("Options:"); + System.err.println(" sourceuri Cluster connection uri of the source table"); + System.err.println(" (defaults to cluster in classpath's config)"); System.err.println(" sourcezkcluster ZK cluster key of the source table"); System.err.println(" (defaults to cluster in classpath's config)"); + System.err.println(" Do not take effect if sourceuri is specified"); + System.err.println(" Deprecated, please use sourceuri instead"); + System.err.println(" targeturi Cluster connection uri of the target table"); + System.err.println(" (defaults to cluster in classpath's config)"); System.err.println(" targetzkcluster ZK cluster key of the target table"); System.err.println(" (defaults to cluster in classpath's config)"); + System.err.println(" Do not take effect if targeturi is specified"); + System.err.println(" Deprecated, please use targeturi instead"); System.err.println(" dryrun if true, output counters but no writes"); System.err.println(" (defaults to false)"); System.err.println(" doDeletes if false, does not perform deletes"); @@ -792,6 +843,11 @@ private boolean doCommandLine(final String[] args) { printUsage(null); return false; } + final String sourceUriKey = "--sourceuri="; + if (cmd.startsWith(sourceUriKey)) { + sourceUri = new URI(cmd.substring(sourceUriKey.length())); + continue; + } final String sourceZkClusterKey = "--sourcezkcluster="; if (cmd.startsWith(sourceZkClusterKey)) { @@ -799,6 +855,12 @@ private boolean doCommandLine(final String[] args) { continue; } + final String targetUriKey = "--targeturi="; + if (cmd.startsWith(targetUriKey)) { + targetUri = new URI(cmd.substring(targetUriKey.length())); + continue; + } + final String targetZkClusterKey = "--targetzkcluster="; if (cmd.startsWith(targetZkClusterKey)) { targetZkCluster = cmd.substring(targetZkClusterKey.length()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index a23393ff804c..3179afd46829 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -20,6 +20,8 @@ import com.codahale.metrics.MetricRegistry; import java.io.File; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.net.URLDecoder; import java.util.ArrayList; @@ -31,8 +33,10 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -41,6 +45,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistryFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; @@ -49,12 +54,13 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.TokenUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.IOExceptionRunnable; +import org.apache.hadoop.hbase.util.IOExceptionSupplier; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -442,6 +448,13 @@ public static void initTableMapperJob(List scans, Class connSupplier, User user, + Job job) throws IOException, InterruptedException { + try (Connection conn = connSupplier.get()) { + TokenUtil.addTokenForJob(conn, user, job); + } + } + public static void initCredentials(Job job) throws IOException { UserProvider userProvider = UserProvider.instantiate(job.getConfiguration()); if (userProvider.isHadoopSecurityEnabled()) { @@ -453,27 +466,32 @@ public static void initCredentials(Job job) throws IOException { } if (userProvider.isHBaseSecurityEnabled()) { + User user = userProvider.getCurrent(); try { // init credentials for remote cluster - String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS); - User user = userProvider.getCurrent(); - if (quorumAddress != null) { - Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); - Connection peerConn = ConnectionFactory.createConnection(peerConf); - try { - TokenUtil.addTokenForJob(peerConn, user, job); - } finally { - peerConn.close(); - } + String outputCluster = job.getConfiguration().get(TableOutputFormat.OUTPUT_CLUSTER); + if (!StringUtils.isBlank(outputCluster)) { + addTokenForJob(() -> { + URI uri; + try { + uri = new URI(outputCluster); + } catch (URISyntaxException e) { + throw new IOException("malformed connection uri: " + outputCluster + + ", please check config " + TableOutputFormat.OUTPUT_CLUSTER, e); + } + return ConnectionFactory.createConnection(uri, job.getConfiguration()); + }, user, job); } - - Connection conn = ConnectionFactory.createConnection(job.getConfiguration()); - try { - TokenUtil.addTokenForJob(conn, user, job); - } finally { - conn.close(); + String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS); + if (!StringUtils.isBlank(quorumAddress)) { + addTokenForJob(() -> { + Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), + quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); + return ConnectionFactory.createConnection(peerConf, user); + }, user, job); } + // init credentials for source cluster + addTokenForJob(() -> ConnectionFactory.createConnection(job.getConfiguration()), user, job); } catch (InterruptedException ie) { LOG.info("Interrupted obtaining user authentication token"); Thread.currentThread().interrupt(); @@ -489,15 +507,24 @@ public static void initCredentials(Job job) throws IOException { * @throws IOException When the authentication token cannot be obtained. */ public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException { + initCredentialsForCluster(job, conf, null); + } + + /** + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. + * @param job The job that requires the permission. + * @param conf The configuration to use in connecting to the peer cluster + * @param uri The connection uri for the given peer cluster + * @throws IOException When the authentication token cannot be obtained. + */ + public static void initCredentialsForCluster(Job job, Configuration conf, URI uri) + throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); if (userProvider.isHBaseSecurityEnabled()) { try { - Connection peerConn = ConnectionFactory.createConnection(conf); - try { - TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job); - } finally { - peerConn.close(); - } + addTokenForJob(() -> ConnectionFactory.createConnection(uri, conf), + userProvider.getCurrent(), job); } catch (InterruptedException e) { LOG.info("Interrupted obtaining user authentication token"); Thread.interrupted(); @@ -549,7 +576,7 @@ public static void initTableReducerJob(String table, Class reducer, Job job, Class partitioner) throws IOException { - initTableReducerJob(table, reducer, job, partitioner, null); + initTableReducerJob(table, reducer, job, partitioner, (URI) null); } /** @@ -570,7 +597,11 @@ public static void initTableReducerJob(String table, Class such as server,server2,server3:2181:/hbase. * @throws IOException When determining the region count fails. + * @deprecated Since 3.0.0, will be removed in 4.0.0. Use + * {@link #initTableReducerJob(String, Class, Job, Class, URI)} instead, where we use + * the connection uri to specify the target cluster. */ + @Deprecated public static void initTableReducerJob(String table, Class reducer, Job job, Class partitioner, String quorumAddress) throws IOException { initTableReducerJob(table, reducer, job, partitioner, quorumAddress, true); @@ -596,23 +627,78 @@ public static void initTableReducerJob(String table, Class reducer, Job job, Class partitioner, String quorumAddress, boolean addDependencyJars) throws IOException { + initTableReducerJob(table, reducer, job, partitioner, () -> { + // If passed a quorum/ensemble address, pass it on to TableOutputFormat. + if (quorumAddress != null) { + // Calling this will validate the format + ZKConfig.validateClusterKey(quorumAddress); + job.getConfiguration().set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress); + } + }, addDependencyJars); + } + + /** + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param outputCluster The HBase cluster you want to write to. Default is null which means output + * to the same cluster you read from, i.e, the cluster when initializing by + * the job's Configuration instance. + * @throws IOException When determining the region count fails. + */ + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, URI outputCluster) throws IOException { + initTableReducerJob(table, reducer, job, partitioner, outputCluster, true); + } + + /** + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all + * necessary HBase configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param outputCluster The HBase cluster you want to write to. Default is null which means + * output to the same cluster you read from, i.e, the cluster when + * initializing by the job's Configuration instance. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @throws IOException When determining the region count fails. + */ + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, URI outputCluster, boolean addDependencyJars) throws IOException { + initTableReducerJob(table, reducer, job, partitioner, () -> { + if (outputCluster != null) { + ConnectionRegistryFactory.validate(outputCluster); + job.getConfiguration().set(TableOutputFormat.OUTPUT_CLUSTER, outputCluster.toString()); + } + }, addDependencyJars); + } + + private static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, IOExceptionRunnable setOutputCluster, boolean addDependencyJars) + throws IOException { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); job.setOutputFormatClass(TableOutputFormat.class); - if (reducer != null) job.setReducerClass(reducer); + if (reducer != null) { + job.setReducerClass(reducer); + } conf.set(TableOutputFormat.OUTPUT_TABLE, table); conf.setStrings("io.serializations", conf.get("io.serializations"), MutationSerialization.class.getName(), ResultSerialization.class.getName()); - // If passed a quorum/ensemble address, pass it on to TableOutputFormat. - if (quorumAddress != null) { - // Calling this will validate the format - ZKConfig.validateClusterKey(quorumAddress); - conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress); - } + setOutputCluster.run(); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Writable.class); if (partitioner == HRegionPartitioner.class) { @@ -853,9 +939,10 @@ public static void addDependencyJarsForClasses(Configuration conf, Class... c } jars.add(path.toString()); } - if (jars.isEmpty()) return; - - conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()]))); + if (jars.isEmpty()) { + return; + } + conf.set("tmpjars", jars.stream().collect(Collectors.joining(","))); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index a8ec67c9b237..e84d5234b1fc 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -53,13 +56,25 @@ public class TableOutputFormat extends OutputFormat implemen /** Job parameter that specifies the output table. */ public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; + /** + * Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying + * between hbase clusters (the source is picked up from hbase-site.xml). + * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, + * Class, java.net.URI) + */ + public static final String OUTPUT_CLUSTER = "hbase.mapred.outputcluster"; + /** * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. For * keys matching this prefix, the prefix is stripped, and the value is set in the configuration * with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" would be set in the * configuration as "key1 = value1". Use this to set properties which should only be applied to * the {@code TableOutputFormat} configuration and not the input configuration. + * @deprecated Since 3.0.0, will be removed in 4.0.0. You do not need to use this way for + * specifying configurations any more, you can specify any configuration with the + * connection uri's queries specified by the {@link #OUTPUT_CLUSTER} parameter. */ + @Deprecated public static final String OUTPUT_CONF_PREFIX = "hbase.mapred.output."; /** @@ -67,10 +82,19 @@ public class TableOutputFormat extends OutputFormat implemen * between hbase clusters (the source is picked up from hbase-site.xml). * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, * Class, String) + * @deprecated Since 3.0.0, will be removed in 4.0.0. Use {@link #OUTPUT_CLUSTER} to specify the + * peer cluster instead. */ + @Deprecated public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum"; - /** Optional job parameter to specify peer cluster's ZK client port */ + /** + * Optional job parameter to specify peer cluster's ZK client port. + * @deprecated Since 3.0.0, will be removed in 4.0.0. You do not need to use this way for + * specifying configurations any more, you can specify any configuration with the + * connection uri's queries specified by the {@link #OUTPUT_CLUSTER} parameter. + */ + @Deprecated public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port"; /** @@ -91,6 +115,23 @@ public class TableOutputFormat extends OutputFormat implemen /** The configuration. */ private Configuration conf = null; + private static Connection createConnection(Configuration conf) throws IOException { + String outputCluster = conf.get(OUTPUT_CLUSTER); + if (!StringUtils.isBlank(outputCluster)) { + URI uri; + try { + uri = new URI(outputCluster); + } catch (URISyntaxException e) { + throw new IOException( + "malformed connection uri: " + outputCluster + ", please check config " + OUTPUT_CLUSTER, + e); + } + return ConnectionFactory.createConnection(uri, conf); + } else { + return ConnectionFactory.createConnection(conf); + } + } + /** * Writes the reducer output to an HBase table. */ @@ -99,13 +140,9 @@ protected class TableRecordWriter extends RecordWriter { private Connection connection; private BufferedMutator mutator; - /** - * - * - */ public TableRecordWriter() throws IOException { + this.connection = createConnection(conf); String tableName = conf.get(OUTPUT_TABLE); - this.connection = ConnectionFactory.createConnection(conf); this.mutator = connection.getBufferedMutator(TableName.valueOf(tableName)); LOG.info("Created table instance for " + tableName); } @@ -175,8 +212,7 @@ public void checkOutputSpecs(JobContext context) throws IOException, Interrupted hConf = context.getConfiguration(); } - try (Connection connection = ConnectionFactory.createConnection(hConf); - Admin admin = connection.getAdmin()) { + try (Connection connection = createConnection(hConf); Admin admin = connection.getAdmin()) { TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE)); if (!admin.tableExists(tableName)) { throw new TableNotFoundException( diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index d83fa1d52522..36422b6e9f4a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.mapreduce.replication; import java.io.IOException; +import java.net.URI; import java.util.Arrays; import java.util.List; import java.util.UUID; @@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistryFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -60,6 +62,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.mapreduce.InputSplit; @@ -210,13 +213,18 @@ public void map(ImmutableBytesWritable row, final Result value, Context context) final InputSplit tableSplit = context.getInputSplit(); - String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = - HBaseConfiguration.createClusterConf(conf, zkClusterKey, PEER_CONFIG_PREFIX); - + String peerQuorumAddress = conf.get(NAME + ".peerQuorumAddress"); + URI connectionUri = ConnectionRegistryFactory.tryParseAsConnectionURI(peerQuorumAddress); + Configuration peerConf; + if (connectionUri != null) { + peerConf = HBaseConfiguration.create(conf); + } else { + peerConf = + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); + } String peerName = peerConf.get(NAME + ".peerTableName", tableName.getNameAsString()); TableName peerTableName = TableName.valueOf(peerName); - replicatedConnection = ConnectionFactory.createConnection(peerConf); + replicatedConnection = ConnectionFactory.createConnection(connectionUri, peerConf); replicatedTable = replicatedConnection.getTable(peerTableName); scan.withStartRow(value.getRow()); @@ -408,10 +416,22 @@ public boolean isAborted() { } } + private Configuration applyURIConf(Configuration conf, URI uri) { + Configuration peerConf = HBaseConfiguration.subset(conf, PEER_CONFIG_PREFIX); + HBaseConfiguration.merge(peerConf, conf); + Strings.applyURIQueriesToConf(uri, peerConf); + return peerConf; + } + private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorumAddress) throws IOException { - Configuration peerConf = - HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); + URI uri = ConnectionRegistryFactory.tryParseAsConnectionURI(peerQuorumAddress); + Configuration peerConf; + if (uri != null) { + peerConf = applyURIConf(conf, uri); + } else { + peerConf = HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); + } FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerFSAddress, peerHBaseRootAddress)); FileSystem fs = FileSystem.get(peerConf); @@ -526,16 +546,24 @@ public Job createSubmittableJob(Configuration conf, String[] args) throws IOExce TableMapReduceUtil.initTableMapperJob(tableName, scan, Verifier.class, null, null, job); } - Configuration peerClusterConf; + Configuration peerClusterBaseConf; if (peerId != null) { assert peerConfigPair != null; - peerClusterConf = peerConfigPair.getSecond(); + peerClusterBaseConf = peerConfigPair.getSecond(); + } else { + peerClusterBaseConf = conf; + } + Configuration peerClusterConf; + URI uri = ConnectionRegistryFactory.tryParseAsConnectionURI(peerQuorumAddress); + if (uri != null) { + peerClusterConf = new Configuration(peerClusterBaseConf); + applyURIConf(peerClusterConf, uri); } else { - peerClusterConf = - HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); + peerClusterConf = HBaseConfiguration.createClusterConf(peerClusterBaseConf, peerQuorumAddress, + PEER_CONFIG_PREFIX); } // Obtain the auth token from peer cluster - TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf); + TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf, uri); job.setOutputFormatClass(NullOutputFormat.class); job.setNumReduceTasks(0); @@ -775,6 +803,9 @@ public boolean doCommandLine(final String[] args) { } private boolean isPeerQuorumAddress(String cmd) { + if (ConnectionRegistryFactory.tryParseAsConnectionURI(cmd) != null) { + return true; + } try { ZKConfig.validateClusterKey(cmd); } catch (IOException e) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java similarity index 76% rename from hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java index f483e00c9177..d9219c9420f4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java @@ -19,32 +19,23 @@ import static org.junit.Assert.assertFalse; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.ClassRule; import org.junit.Test; -import org.junit.experimental.categories.Category; /** * Test CopyTable between clusters */ -@Category({ MapReduceTests.class, LargeTests.class }) -public class TestCopyTableToPeerCluster extends CopyTableTestBase { +public abstract class CopyTableToPeerClusterTestBase extends CopyTableTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTableToPeerCluster.class); + protected static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil(); - private static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil(); - - private static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); + protected static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); @BeforeClass public static void beforeClass() throws Exception { @@ -80,7 +71,7 @@ protected void dropTargetTable(TableName tableName) throws Exception { @Override protected String[] getPeerClusterOptions() throws Exception { - return new String[] { "--peer.adr=" + UTIL2.getClusterKey() }; + return new String[] { "--peer.uri=" + UTIL2.getRpcConnnectionURI() }; } /** @@ -118,9 +109,9 @@ public void testBulkLoadNotSupported() throws Exception { TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); try (Table t1 = UTIL1.createTable(tableName1, FAMILY_A); Table t2 = UTIL2.createTable(tableName2, FAMILY_A)) { - assertFalse(runCopy(UTIL1.getConfiguration(), - new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", - "--peer.adr=" + UTIL2.getClusterKey(), tableName1.getNameAsString() })); + String[] args = ArrayUtils.addAll(getPeerClusterOptions(), + "--new.name=" + tableName2.getNameAsString(), "--bulkload", tableName1.getNameAsString()); + assertFalse(runCopy(UTIL1.getConfiguration(), args)); } finally { UTIL1.deleteTable(tableName1); UTIL2.deleteTable(tableName2); @@ -135,14 +126,13 @@ public void testSnapshotNotSupported() throws Exception { try (Table t1 = UTIL1.createTable(tableName1, FAMILY_A); Table t2 = UTIL2.createTable(tableName2, FAMILY_A)) { UTIL1.getAdmin().snapshot(snapshot, tableName1); - assertFalse(runCopy(UTIL1.getConfiguration(), - new String[] { "--new.name=" + tableName2.getNameAsString(), "--snapshot", - "--peer.adr=" + UTIL2.getClusterKey(), snapshot })); + String[] args = ArrayUtils.addAll(getPeerClusterOptions(), + "--new.name=" + tableName2.getNameAsString(), "--snapshot", snapshot); + assertFalse(runCopy(UTIL1.getConfiguration(), args)); } finally { UTIL1.getAdmin().deleteSnapshot(snapshot); UTIL1.deleteTable(tableName1); UTIL2.deleteTable(tableName2); } - } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java new file mode 100644 index 000000000000..6ff9afda5357 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ MapReduceTests.class, LargeTests.class }) +public class TestCopyTableToPeerClusterWithClusterKey extends CopyTableToPeerClusterTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCopyTableToPeerClusterWithClusterKey.class); + + @Override + protected String[] getPeerClusterOptions() throws Exception { + return new String[] { "--peer.adr=" + UTIL2.getClusterKey() }; + } + +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java new file mode 100644 index 000000000000..4e6293712ec2 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ MapReduceTests.class, LargeTests.class }) +public class TestCopyTableToPeerClusterWithRpcUri extends CopyTableToPeerClusterTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCopyTableToPeerClusterWithRpcUri.class); + + @Override + protected String[] getPeerClusterOptions() throws Exception { + return new String[] { "--peer.uri=" + UTIL2.getZkConnectionURI() }; + } + +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java new file mode 100644 index 000000000000..720c367eb739 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ MapReduceTests.class, LargeTests.class }) +public class TestCopyTableToPeerClusterWithZkUri extends CopyTableToPeerClusterTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCopyTableToPeerClusterWithZkUri.class); + + @Override + protected String[] getPeerClusterOptions() throws Exception { + return new String[] { "--peer.uri=" + UTIL2.getRpcConnnectionURI() }; + } + +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index d775f256ef12..2434df6adf51 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -121,11 +121,17 @@ public void testSyncTable() throws Exception { @Test public void testSyncTableToPeerCluster() throws Exception { - testSyncTable(UTIL1, UTIL2, "--sourcezkcluster=" + UTIL1.getClusterKey()); + testSyncTable(UTIL1, UTIL2, "--sourceuri=" + UTIL1.getRpcConnnectionURI()); } @Test public void testSyncTableFromSourceToPeerCluster() throws Exception { + testSyncTable(UTIL2, UTIL1, "--sourceuri=" + UTIL2.getRpcConnnectionURI(), + "--targeturi=" + UTIL1.getZkConnectionURI()); + } + + @Test + public void testSyncTableFromSourceToPeerClusterWithClusterKey() throws Exception { testSyncTable(UTIL2, UTIL1, "--sourcezkcluster=" + UTIL2.getClusterKey(), "--targetzkcluster=" + UTIL1.getClusterKey()); } @@ -185,7 +191,7 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { writeTestData(UTIL1, sourceTableName, UTIL2, targetTableName, current - 1000, current); hashSourceTable(UTIL1, sourceTableName, testDir, "--ignoreTimestamps=true"); Counters syncCounters = syncTables(UTIL2.getConfiguration(), sourceTableName, targetTableName, - testDir, "--ignoreTimestamps=true", "--sourcezkcluster=" + UTIL1.getClusterKey()); + testDir, "--ignoreTimestamps=true", "--sourceuri=" + UTIL1.getRpcConnnectionURI()); assertEqualTables(90, UTIL1, sourceTableName, UTIL2, targetTableName, true); assertEquals(50, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index 03cf6a441f4d..8c88d9bb4ee1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -24,6 +24,7 @@ import java.io.Closeable; import java.io.File; +import java.net.URI; import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -197,8 +198,8 @@ public void testInitCredentialsForCluster2() throws Exception { kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL); loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath()); - try (Closeable util1Closeable = startSecureMiniCluster(util1, kdc, userPrincipal); - Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) { + try (Closeable ignored1 = startSecureMiniCluster(util1, kdc, userPrincipal); + Closeable ignored2 = startSecureMiniCluster(util2, kdc, userPrincipal)) { Configuration conf1 = util1.getConfiguration(); Job job = Job.getInstance(conf1); @@ -231,7 +232,7 @@ public void testInitCredentialsForCluster3() throws Exception { kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL); loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath()); - try (Closeable util1Closeable = startSecureMiniCluster(util1, kdc, userPrincipal)) { + try (Closeable ignored1 = startSecureMiniCluster(util1, kdc, userPrincipal)) { HBaseTestingUtil util2 = new HBaseTestingUtil(); // Assume util2 is insecure cluster // Do not start util2 because cannot boot secured mini cluster and insecure mini cluster at @@ -267,7 +268,7 @@ public void testInitCredentialsForCluster4() throws Exception { kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL); loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath()); - try (Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) { + try (Closeable ignored2 = startSecureMiniCluster(util2, kdc, userPrincipal)) { Configuration conf1 = util1.getConfiguration(); Job job = Job.getInstance(conf1); @@ -287,4 +288,43 @@ public void testInitCredentialsForCluster4() throws Exception { kdc.stop(); } } + + @Test + @SuppressWarnings("unchecked") + public void testInitCredentialsForClusterUri() throws Exception { + HBaseTestingUtil util1 = new HBaseTestingUtil(); + HBaseTestingUtil util2 = new HBaseTestingUtil(); + + File keytab = new File(util1.getDataTestDir("keytab").toUri().getPath()); + MiniKdc kdc = util1.setupMiniKdc(keytab); + try { + String username = UserGroupInformation.getLoginUser().getShortUserName(); + String userPrincipal = username + "/localhost"; + kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL); + loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath()); + + try (Closeable ignored1 = startSecureMiniCluster(util1, kdc, userPrincipal); + Closeable ignored2 = startSecureMiniCluster(util2, kdc, userPrincipal)) { + Configuration conf1 = util1.getConfiguration(); + Job job = Job.getInstance(conf1); + + // use Configuration from util1 and URI from util2, to make sure that we use the URI instead + // of rely on the Configuration + TableMapReduceUtil.initCredentialsForCluster(job, util1.getConfiguration(), + new URI(util2.getRpcConnnectionURI())); + + Credentials credentials = job.getCredentials(); + Collection> tokens = credentials.getAllTokens(); + assertEquals(1, tokens.size()); + + String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher()); + Token tokenForCluster = + (Token) credentials.getToken(new Text(clusterId)); + assertEquals(userPrincipal + '@' + kdc.getRealm(), + tokenForCluster.decodeIdentifier().getUsername()); + } + } finally { + kdc.stop(); + } + } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index 7044b002a5eb..db7cead8c5db 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -62,8 +62,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * We moved some of {@link TestVerifyReplication}'s tests here because it could take too long to - * complete. In here we have miscellaneous. + * We moved some of {@link TestVerifyReplicationZkClusterKey}'s tests here because it could take too + * long to complete. In here we have miscellaneous. */ @Category({ ReplicationTests.class, LargeTests.class }) public class TestVerifyReplicationAdjunct extends TestReplicationBase { @@ -171,7 +171,7 @@ public void testHBase14905() throws Exception { assertEquals(5, res1[0].getColumnCells(famName, qualifierName).size()); String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() }; - TestVerifyReplication.runVerifyReplication(args, 0, 1); + TestVerifyReplicationZkClusterKey.runVerifyReplication(args, 0, 1); } // VerifyReplication should honor versions option @@ -237,7 +237,7 @@ public void testVersionMismatchHBase14905() throws Exception { assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size()); String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() }; - TestVerifyReplication.runVerifyReplication(args, 0, 1); + TestVerifyReplicationZkClusterKey.runVerifyReplication(args, 0, 1); } finally { hbaseAdmin.enableReplicationPeer(PEER_ID); } @@ -254,7 +254,7 @@ public void testVerifyReplicationPrefixFiltering() throws Exception { waitForReplication(NB_ROWS_IN_BATCH * 4, NB_RETRIES * 4); String[] args = new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() }; - TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH * 2, 0); + TestVerifyReplicationZkClusterKey.runVerifyReplication(args, NB_ROWS_IN_BATCH * 2, 0); } @Test @@ -317,9 +317,9 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", tableName.getNameAsString() }; - TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); - TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 1); - TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 1); + TestVerifyReplicationZkClusterKey.runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); + TestVerifyReplicationZkClusterKey.checkRestoreTmpDir(CONF1, temPath1, 1); + TestVerifyReplicationZkClusterKey.checkRestoreTmpDir(CONF2, temPath2, 1); Scan scan = new Scan(); ResultScanner rs = htable2.getScanner(scan); @@ -347,9 +347,9 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", tableName.getNameAsString() }; - TestVerifyReplication.runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); - TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 2); - TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 2); + TestVerifyReplicationZkClusterKey.runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); + TestVerifyReplicationZkClusterKey.checkRestoreTmpDir(CONF1, temPath1, 2); + TestVerifyReplicationZkClusterKey.checkRestoreTmpDir(CONF2, temPath2, 2); } @AfterClass diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java new file mode 100644 index 000000000000..3e603ec41ac8 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestVerifyReplicationRpcConnectionUri extends VerifyReplicationTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVerifyReplicationRpcConnectionUri.class); + + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + return util.getRpcConnnectionURI(); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java new file mode 100644 index 000000000000..718cba231ff4 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestVerifyReplicationZkClusterKey extends VerifyReplicationTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVerifyReplicationZkClusterKey.class); + + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + return util.getClusterKey(); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java new file mode 100644 index 000000000000..046d2d06664c --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestVerifyReplicationZkConnectionUri extends VerifyReplicationTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVerifyReplicationZkConnectionUri.class); + + @Override + protected String getClusterKey(HBaseTestingUtil util) throws Exception { + return util.getZkConnectionURI(); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java similarity index 94% rename from hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java index c7b0ed4c4b05..e263076677a5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java @@ -32,7 +32,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -52,8 +51,6 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -62,22 +59,16 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; -import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ReplicationTests.class, LargeTests.class }) -public class TestVerifyReplication extends TestReplicationBase { +public abstract class VerifyReplicationTestBase extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplication.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplication.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestVerifyReplicationZkClusterKey.class); private static final String PEER_ID = "2"; private static final TableName peerTableName = TableName.valueOf("peerTest"); @@ -86,14 +77,6 @@ public class TestVerifyReplication extends TestReplicationBase { @Rule public TestName name = new TestName(); - @Override - protected String getClusterKey(HBaseTestingUtil util) throws Exception { - // TODO: VerifyReplication does not support connection uri yet, so here we need to use cluster - // key, as in this test we will pass the cluster key config in peer config directly to - // VerifyReplication job. - return util.getClusterKey(); - } - @Before public void setUp() throws Exception { cleanUp(); @@ -268,7 +251,7 @@ public void testVerifyRepJobWithQuorumAddress() throws Exception { runSmallBatchTest(); // with a quorum address (a cluster key) - String[] args = new String[] { UTIL2.getClusterKey(), tableName.getNameAsString() }; + String[] args = new String[] { getClusterKey(UTIL2), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); Scan scan = new Scan(); @@ -313,7 +296,7 @@ public void testVerifyRepJobWithQuorumAddressAndSnapshotSupport() throws Excepti String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), getClusterKey(UTIL2), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); checkRestoreTmpDir(CONF1, tmpPath1, 1); @@ -343,7 +326,7 @@ public void testVerifyRepJobWithQuorumAddressAndSnapshotSupport() throws Excepti args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), getClusterKey(UTIL2), tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); checkRestoreTmpDir(CONF1, tmpPath1, 2); @@ -385,7 +368,7 @@ public void testVerifyRepJobWithPeerTableName() throws Exception { // with a peerTableName along with quorum address (a cluster key) String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; + getClusterKey(UTIL2), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); UTIL2.deleteTableData(peerTableName); @@ -419,7 +402,7 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), getClusterKey(UTIL2), tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); checkRestoreTmpDir(CONF1, tmpPath1, 1); @@ -450,7 +433,7 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), getClusterKey(UTIL2), tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); checkRestoreTmpDir(CONF1, tmpPath1, 2); @@ -479,7 +462,7 @@ public void testVerifyReplicationThreadedRecompares() throws Exception { String[] args = new String[] { "--recompareThreads=10", "--recompareTries=3", "--recompareSleep=1", "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; + getClusterKey(UTIL2), tableName.getNameAsString() }; Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); assertEquals( counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); @@ -523,7 +506,7 @@ public void testFailsRemainingComparesAfterShutdown() throws Exception { */ String[] args = new String[] { "--recompareThreads=1", "--recompareTries=1", "--recompareSleep=121000", "--peerTableName=" + peerTableName.getNameAsString(), - UTIL2.getClusterKey(), tableName.getNameAsString() }; + getClusterKey(UTIL2), tableName.getNameAsString() }; Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); assertEquals( @@ -561,7 +544,7 @@ public void testVerifyReplicationSynchronousRecompares() throws Exception { htable1.put(put); String[] args = new String[] { "--recompareTries=3", "--recompareSleep=1", - "--peerTableName=" + peerTableName.getNameAsString(), UTIL2.getClusterKey(), + "--peerTableName=" + peerTableName.getNameAsString(), getClusterKey(UTIL2), tableName.getNameAsString() }; Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); assertEquals( From 0e8cfdb507723729a2cade50ea364149eec3466c Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 12 Jun 2024 10:33:02 -0700 Subject: [PATCH 411/514] HBASE-28637 asyncwal should attempt to recover lease if close fails (#5962) Signed-off-by: Duo Zhang --- .../hadoop/hbase/regionserver/wal/AbstractFSWAL.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 5f06b04cdf92..bba9bd534e9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.RecoverLeaseFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -2022,13 +2023,22 @@ protected final void waitForSafePoint() { } } + private void recoverLease(FileSystem fs, Path p, Configuration conf) { + try { + RecoverLeaseFSUtils.recoverFileLease(fs, p, conf, null); + } catch (IOException ex) { + LOG.error("Unable to recover lease after several attempts. Give up.", ex); + } + } + protected final void closeWriter(W writer, Path path) { inflightWALClosures.put(path.getName(), writer); closeExecutor.execute(() -> { try { writer.close(); } catch (IOException e) { - LOG.warn("close old writer failed", e); + LOG.warn("close old writer failed.", e); + recoverLease(this.fs, path, conf); } finally { // call this even if the above close fails, as there is no other chance we can set closed to // true, it will not cause big problems. From bd8ad45a259a1b0066068e41a25ef4c9098d374a Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Sat, 15 Jun 2024 22:15:20 -0400 Subject: [PATCH 412/514] HBASE--28666 Dropping unclosed WALTailingReaders leads to leaked sockets (#5994) In WALEntryStream, always use current WALTailingReader if one exists Signed-off-by: Duo Zhang --- .../hadoop/hbase/replication/regionserver/WALEntryStream.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 186d5b7c4d18..8d74d0e0399e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -219,6 +219,7 @@ private HasNext prepareReader() { // we will read from the beginning so we should always clear the compression context reader.resetTo(-1, true); } + return HasNext.YES; } catch (IOException e) { LOG.warn("Failed to reset reader {} to pos {}, reset compression={}", currentPath, currentPositionOfEntry, state.resetCompression(), e); From 9ef621db90a77f55c187a037e6fc6eeb7bc0f69f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 16 Jun 2024 17:12:00 +0800 Subject: [PATCH 413/514] Revert "HBASE--28666 Dropping unclosed WALTailingReaders leads to leaked sockets (#5994)" This reverts commit bd8ad45a259a1b0066068e41a25ef4c9098d374a. --- .../hadoop/hbase/replication/regionserver/WALEntryStream.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 8d74d0e0399e..186d5b7c4d18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -219,7 +219,6 @@ private HasNext prepareReader() { // we will read from the beginning so we should always clear the compression context reader.resetTo(-1, true); } - return HasNext.YES; } catch (IOException e) { LOG.warn("Failed to reset reader {} to pos {}, reset compression={}", currentPath, currentPositionOfEntry, state.resetCompression(), e); From 60c46118e21319651c099928ab929800d0de92b4 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Sat, 15 Jun 2024 22:15:20 -0400 Subject: [PATCH 414/514] HBASE-28666 Dropping unclosed WALTailingReaders leads to leaked sockets (#5994) In WALEntryStream, always use current WALTailingReader if one exists Signed-off-by: Duo Zhang --- .../hadoop/hbase/replication/regionserver/WALEntryStream.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 186d5b7c4d18..8d74d0e0399e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -219,6 +219,7 @@ private HasNext prepareReader() { // we will read from the beginning so we should always clear the compression context reader.resetTo(-1, true); } + return HasNext.YES; } catch (IOException e) { LOG.warn("Failed to reset reader {} to pos {}, reset compression={}", currentPath, currentPositionOfEntry, state.resetCompression(), e); From b4f24cf8c6bc1bb675cca7d41962bf56a6f879cf Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 18 Jun 2024 16:30:58 +0800 Subject: [PATCH 415/514] HBASE-28617 Add trademark statement in footer on our website (#5993) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also change ™ to ® on the index page as HBase is a registered trademark Signed-off-by: Nick Dimiduk --- pom.xml | 34 ++++++++++++++++++++++++++++----- src/site/resources/css/site.css | 24 ++++++++++------------- src/site/site.xml | 25 ++++++++++++++++++++---- src/site/xdoc/index.xml | 22 ++++++++++----------- 4 files changed, 71 insertions(+), 34 deletions(-) diff --git a/pom.xml b/pom.xml index 7afa8b72c37c..59a1c95b9056 100644 --- a/pom.xml +++ b/pom.xml @@ -41,7 +41,7 @@ ${revision} pom Apache HBase - Apache HBase™ is the Hadoop database. Use it when you need + Apache HBase® is the Hadoop database. Use it when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware. @@ -2259,6 +2259,30 @@ false + + set-current-year + + timestamp-property + + pre-site + + current.year + yyyy + year + + + + set-current-date + + timestamp-property + + pre-site + + current.date + yyyy-MM-dd + day + + @@ -3028,7 +3052,7 @@ devapidocs Developer API The full HBase API, including private and unstable APIs - Apache HBase™ ${project.version} API + Apache HBase® ${project.version} API **/generated/* **/protobuf/* @@ -3079,7 +3103,7 @@ testdevapidocs Test Developer API The full HBase API test code, including private and unstable APIs - Apache HBase™ ${project.version} Test API + Apache HBase® ${project.version} Test API **/generated/* **/protobuf/* @@ -3139,7 +3163,7 @@ apidocs User API The HBase Application Programmer's API - Apache HBase™ ${project.version} API + Apache HBase® ${project.version} API org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress* false @@ -3197,7 +3221,7 @@ testapidocs Test User API The HBase Application Programmer's API test code - Apache HBase™ ${project.version} Test API + Apache HBase® ${project.version} Test API org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress* false diff --git a/src/site/resources/css/site.css b/src/site/resources/css/site.css index 3f42f5ab61d2..9426cd67a4f3 100644 --- a/src/site/resources/css/site.css +++ b/src/site/resources/css/site.css @@ -29,10 +29,9 @@ li { line-height: 120%; } -div#topbar, +header#topbar, div#banner, -div#breadcrumbs, -div#bodyColumn, +main#bodyColumn, footer { width: initial; padding-left: 20px; @@ -52,9 +51,7 @@ div#search-form.navbar-search.pull-right { margin-left: 0; position: initial; } -li#publishDate.pull-right { - list-style: none; -} + .container, .navbar-static-top .container, .navbar-fixed-top .container, @@ -105,14 +102,13 @@ li#publishDate.pull-right { } /* Override weird body padding thing that causes scrolling */ -@media (max-width: 767px) -body { +@media (max-width: 767px) { + body { padding-right: 0; padding-left: 0; -} - -@media (max-width: 767px) -.navbar-fixed-top, .navbar-fixed-bottom, .navbar-static-top { - margin-left: 0; - margin-right: 0; + } + .navbar-fixed-top, .navbar-fixed-bottom, .navbar-static-top { + margin-left: 0; + margin-right: 0; + } } \ No newline at end of file diff --git a/src/site/site.xml b/src/site/site.xml index c430faf2753b..7d05b7419c28 100644 --- a/src/site/site.xml +++ b/src/site/site.xml @@ -33,9 +33,9 @@ my end-user's GAV for ease of releasing this to maven central until the upstream update happens and is released. See HBASE-14785 and HBASE-21005 for more info. --> - com.github.joshelser + org.apache.maven.skins maven-fluido-skin - 1.7.1-HBase + 1.12.0 @@ -50,6 +50,11 @@ false true » + + apache/hbase + right + red + @@ -67,7 +72,7 @@ images/hbase_logo_with_orca_large.png http://hbase.apache.org/ - + @@ -152,5 +157,17 @@ - +
    + + Copyright ©2007–${current.year} The Apache Software Foundation. All rights reserved.
    + Apache HBase, HBase, Apache, the Apache HBase logo and the ASF logo are either registered trademarks or trademarks of the Apache Software Foundation. + All other marks mentioned may be trademarks or registered trademarks of their respective owners. + +
    + Last Published: ${current.date} +
    + ]]> +
    + diff --git a/src/site/xdoc/index.xml b/src/site/xdoc/index.xml index 67d962baed2a..b2dac50bcb92 100644 --- a/src/site/xdoc/index.xml +++ b/src/site/xdoc/index.xml @@ -21,21 +21,21 @@ under the License. xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd"> - Apache HBase™ Home + Apache HBase® Home -
    -

    Apache HBase™ is the Hadoop database, a distributed, scalable, big data store.

    -

    Use Apache HBase™ when you need random, realtime read/write access to your Big Data. +

    +

    Apache HBase® is the Hadoop database, a distributed, scalable, big data store.

    +

    Use Apache HBase® when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware. - Apache HBase is an open-source, distributed, versioned, non-relational database modeled after Google's Bigtable: A Distributed Storage System for Structured Data by Chang et al. - Just as Bigtable leverages the distributed data storage provided by the Google File System, Apache HBase provides Bigtable-like capabilities on top of Hadoop and HDFS. + Apache HBase® is an open-source, distributed, versioned, non-relational database modeled after Google's Bigtable: A Distributed Storage System for Structured Data by Chang et al. + Just as Bigtable leverages the distributed data storage provided by the Google File System, Apache HBase® provides Bigtable-like capabilities on top of Hadoop and HDFS.

    -

    Click here to download Apache HBase™.

    +

    Click here to download Apache HBase®.

    @@ -44,7 +44,7 @@ under the License.

  • Strictly consistent reads and writes.
  • Automatic and configurable sharding of tables
  • Automatic failover support between RegionServers.
  • -
  • Convenient base classes for backing Hadoop MapReduce jobs with Apache HBase tables.
  • +
  • Convenient base classes for backing Hadoop MapReduce jobs with Apache HBase® tables.
  • Easy to use Java API for client access.
  • Block cache and Bloom Filters for real-time queries.
  • Query predicate push down via server side Filters
  • @@ -55,20 +55,20 @@ under the License.

    -

    See the Architecture Overview, the Apache HBase Reference Guide FAQ, and the other documentation links.

    +

    See the Architecture Overview, the Apache HBase® Reference Guide FAQ, and the other documentation links.

    Export Control

    The HBase distribution includes cryptographic software. See the export control notice here

    Code Of Conduct

    We expect participants in discussions on the HBase project mailing lists, Slack and IRC channels, and JIRA issues to abide by the Apache Software Foundation's Code of Conduct. More information can be found here.

    License
    -

    Apache HBase is licensed under the Apache License, Version 2.0

    +

    Apache HBase® is licensed under the Apache License, Version 2.0

    Trademarks

    Apache HBase, HBase, Apache, the Apache feather logo, and the Apache HBase project logo are either registered trademarks or trademarks of The Apache Software Foundation in the United States and other countries.

    Thanks

    Thanks for all the sponsors, who are supporting Apache or supporting the HBase project!

    Security and Vulnerability information
    -

    See the Security chapter in the Apache HBase Reference Guide, and the general Apache Security information!

    +

    See the Security chapter in the Apache HBase® Reference Guide, and the general Apache Security information!

    From f2b7b77552ef1c4769a2c933c0f22f7cbfe2f237 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 18 Jun 2024 15:35:49 +0200 Subject: [PATCH 416/514] HBASE-28650 REST multiget endpoint returns 500 error if no rows are specified (#5999) Signed-off-by: Duo Zhang --- .../hadoop/hbase/rest/MultiRowResource.java | 20 ++++++++++--------- .../hbase/rest/TestMultiRowResource.java | 11 ++++++++++ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 8cce772472a8..84912f500d39 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -98,19 +98,21 @@ public Response get(final @Context UriInfo uriInfo, parsedParamFilter = pf.parseFilterString(filterBytes); } List rowSpecs = new ArrayList<>(); - for (String rk : params.get(ROW_KEYS_PARAM_NAME)) { - RowSpec rowSpec = new RowSpec(rk, keyEncoding); + if (params.containsKey(ROW_KEYS_PARAM_NAME)) { + for (String rk : params.get(ROW_KEYS_PARAM_NAME)) { + RowSpec rowSpec = new RowSpec(rk, keyEncoding); - if (this.versions != null) { - rowSpec.setMaxVersions(this.versions); - } + if (this.versions != null) { + rowSpec.setMaxVersions(this.versions); + } - if (this.columns != null) { - for (int i = 0; i < this.columns.length; i++) { - rowSpec.addColumn(Bytes.toBytes(this.columns[i])); + if (this.columns != null) { + for (int i = 0; i < this.columns.length; i++) { + rowSpec.addColumn(Bytes.toBytes(this.columns[i])); + } } + rowSpecs.add(rowSpec); } - rowSpecs.add(rowSpec); } MultiRowResultReader reader = new MultiRowResultReader(this.tableResource.getName(), rowSpecs, diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index bfe5846e0710..ee1daa3ab3f3 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -223,6 +223,17 @@ public void testMultiCellGetJSONB64() throws IOException { client.delete(row_6_url, extraHdr); } + @Test + public void testMultiCellGetNoKeys() throws IOException { + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget"); + + Response response = client.get(path.toString(), Constants.MIMETYPE_XML); + assertEquals(404, response.getCode()); + } + @Test public void testMultiCellGetXML() throws IOException { String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; From cd4c5c30b7e8f92d2a9170d4cfa156fdc2ebaa26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:35:31 +0800 Subject: [PATCH 417/514] HBASE-28673 Bump urllib3 in /dev-support/git-jira-release-audit (#5997) Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.18 to 1.26.19. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/1.26.19/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.18...1.26.19) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index 01328a1b49b4..e2a04baca778 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -35,5 +35,5 @@ requests-oauthlib==1.3.0 requests-toolbelt==0.9.1 six==1.14.0 smmap2==2.0.5 -urllib3==1.26.18 +urllib3==1.26.19 wcwidth==0.1.8 From 85bd8eef2c4ff190ce3c248979e61280e2b4d5d5 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 19 Jun 2024 10:02:43 +0200 Subject: [PATCH 418/514] HBASE-28662 Removing missing scanner via REST should return 404 (#5989) Signed-off-by: Duo Zhang --- .../hadoop/hbase/rest/ScannerInstanceResource.java | 10 ++++++++++ .../apache/hadoop/hbase/rest/TestScannerResource.java | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index 951cafc8632a..28ba60fa19b7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -161,6 +161,11 @@ public Response getBinary(final @Context UriInfo uriInfo) { } servlet.getMetrics().incrementRequests(1); try { + if (generator == null) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); + } Cell value = generator.next(); if (value == null) { if (LOG.isTraceEnabled()) { @@ -199,6 +204,11 @@ public Response delete(final @Context UriInfo uriInfo) { return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) .entity("Forbidden" + CRLF).build(); } + if (generator == null) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); + } if (ScannerResource.delete(id)) { servlet.getMetrics().incrementSucessfulDeleteRequests(1); } else { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java index 4e23c708ff1c..46be9a149601 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -393,4 +393,10 @@ public void testTableScanWithTableDisable() throws IOException { response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF); assertEquals(410, response.getCode()); } + + @Test + public void deleteNonExistent() throws IOException { + Response response = client.delete("/" + TABLE + "/scanner/NONEXISTENT_SCAN"); + assertEquals(404, response.getCode()); + } } From 046e9d55409755285026ff5bba2797692af42840 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 19 Jun 2024 10:48:07 +0200 Subject: [PATCH 419/514] HBASE-28671 Add close method to REST client (#5998) also expose HttpClientConnectionManager Signed-off-by: Duo Zhang --- .../hadoop/hbase/rest/client/Client.java | 40 +++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 5c89dea48e74..74d3a11d3f2a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -63,8 +63,10 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.conn.HttpClientConnectionManager; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.cookie.BasicClientCookie; @@ -88,7 +90,7 @@ public class Client { private static final Logger LOG = LoggerFactory.getLogger(Client.class); - private HttpClient httpClient; + private CloseableHttpClient httpClient; private Cluster cluster; private Integer lastNodeId; private boolean sticky = false; @@ -115,7 +117,7 @@ public Client() { private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, boolean sticky, Optional trustStore, Optional userName, Optional password, - Optional bearerToken) { + Optional bearerToken, Optional connManager) { this.cluster = cluster; this.conf = conf; this.sslEnabled = sslEnabled; @@ -178,6 +180,8 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, extraHeaders.put(HttpHeaders.AUTHORIZATION, "Bearer " + bearerToken.get()); } + connManager.ifPresent(httpClientBuilder::setConnectionManager); + this.httpClient = httpClientBuilder.build(); setSticky(sticky); } @@ -201,7 +205,7 @@ public Client(Cluster cluster) { */ public Client(Cluster cluster, boolean sslEnabled) { initialize(cluster, HBaseConfiguration.create(), sslEnabled, false, Optional.empty(), - Optional.empty(), Optional.empty(), Optional.empty()); + Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } /** @@ -214,7 +218,7 @@ public Client(Cluster cluster, boolean sslEnabled) { */ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { initialize(cluster, conf, sslEnabled, false, Optional.empty(), Optional.empty(), - Optional.empty(), Optional.empty()); + Optional.empty(), Optional.empty(), Optional.empty()); } /** @@ -238,7 +242,11 @@ public Client(Cluster cluster, String trustStorePath, Optional trustStor * or BEARER authentication in sticky mode, which does not use the old faulty load balancing * logic, and enables correct session handling. If neither userName/password, nor the bearer token * is specified, the client falls back to SPNEGO auth. The loadTrustsore static method can be used - * to load a local trustStore file. This is the preferred constructor to use. + * to load a local TrustStore file. If connManager is specified, it must be fully configured. Even + * then, the TrustStore related parameters must be specified because they are also used for SPNEGO + * authentication which uses a separate HTTP client implementation. Specifying the + * HttpClientConnectionManager is an experimental feature. It exposes the internal HTTP library + * details, and may be changed/removed when the library is updated or replaced. * @param cluster the cluster definition * @param conf HBase/Hadoop configuration * @param sslEnabled use HTTPS @@ -247,10 +255,19 @@ public Client(Cluster cluster, String trustStorePath, Optional trustStor * @param password for BASIC auth * @param bearerToken for BEAERER auth */ + @InterfaceAudience.Private + public Client(Cluster cluster, Configuration conf, boolean sslEnabled, + Optional trustStore, Optional userName, Optional password, + Optional bearerToken, Optional connManager) { + initialize(cluster, conf, sslEnabled, true, trustStore, userName, password, bearerToken, + connManager); + } + public Client(Cluster cluster, Configuration conf, boolean sslEnabled, Optional trustStore, Optional userName, Optional password, Optional bearerToken) { - initialize(cluster, conf, sslEnabled, true, trustStore, userName, password, bearerToken); + initialize(cluster, conf, sslEnabled, true, trustStore, userName, password, bearerToken, + Optional.empty()); } /** @@ -269,7 +286,7 @@ public Client(Cluster cluster, Configuration conf, String trustStorePath, Optional trustStorePassword, Optional trustStoreType) { KeyStore trustStore = loadTruststore(trustStorePath, trustStorePassword, trustStoreType); initialize(cluster, conf, true, false, Optional.of(trustStore), Optional.empty(), - Optional.empty(), Optional.empty()); + Optional.empty(), Optional.empty(), Optional.empty()); } /** @@ -970,4 +987,13 @@ public ClientTrustStoreInitializationException(String message, Throwable cause) super(message, cause); } } + + public void close() { + try { + httpClient.close(); + } catch (Exception e) { + LOG.info("Exception while shutting down connection manager", e); + } + } + } From ff1e52bfce2d482d929fc740fffa181c4929b15f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 19 Jun 2024 19:06:29 +0800 Subject: [PATCH 420/514] HBASE-28617 Addendum fix broken links --- src/main/asciidoc/book.adoc | 2 +- src/site/site.xml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/main/asciidoc/book.adoc b/src/main/asciidoc/book.adoc index f02f5000c78c..90850d80b57e 100644 --- a/src/main/asciidoc/book.adoc +++ b/src/main/asciidoc/book.adoc @@ -19,7 +19,7 @@ */ //// -= Apache HBase (TM) Reference Guide += Apache HBase^®^ Reference Guide :Author: Apache HBase Team :Email: :doctype: book diff --git a/src/site/site.xml b/src/site/site.xml index 7d05b7419c28..3629656496f3 100644 --- a/src/site/site.xml +++ b/src/site/site.xml @@ -89,18 +89,18 @@ - - + + - + - - + + From bd9053ca4a8af1a1271b39602fc6fc57de36d76a Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Wed, 19 Jun 2024 14:38:18 +0100 Subject: [PATCH 421/514] HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906) Signed-off-by: Tak Lon (Stephen) Wu Reviewed0by: Duo Zhang --- .../hadoop/hbase/io/HalfStoreFileReader.java | 42 ++++++ .../hadoop/hbase/io/hfile/BlockCache.java | 11 ++ .../hadoop/hbase/io/hfile/BlockCacheUtil.java | 42 ++++++ .../hadoop/hbase/io/hfile/CacheConfig.java | 3 + .../hbase/io/hfile/CombinedBlockCache.java | 7 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 13 +- .../hbase/io/hfile/HFilePreadReader.java | 2 +- .../hbase/io/hfile/HFileReaderImpl.java | 43 +++--- .../hbase/io/hfile/bucket/BucketCache.java | 117 +++++++++++----- .../TransitRegionStateProcedure.java | 21 +-- .../hbase/regionserver/StoreFileReader.java | 2 +- .../handler/UnassignRegionHandler.java | 8 +- .../hadoop/hbase/TestSplitWithCache.java | 130 ++++++++++++++++++ .../hbase/io/TestHalfStoreFileReader.java | 37 +++-- .../hadoop/hbase/io/hfile/TestPrefetch.java | 8 -- .../io/hfile/TestPrefetchWithBucketCache.java | 70 +++++++++- .../bucket/TestBucketCachePersister.java | 6 + 17 files changed, 457 insertions(+), 105 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 0989f73df0a8..862fbc69809d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.IntConsumer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileInfo; +import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; @@ -64,6 +66,8 @@ public class HalfStoreFileReader extends StoreFileReader { private boolean firstKeySeeked = false; + private AtomicBoolean closed = new AtomicBoolean(false); + /** * Creates a half file reader for a hfile referred to by an hfilelink. * @param context Reader context info @@ -335,4 +339,42 @@ public long getFilterEntries() { // Estimate the number of entries as half the original file; this may be wildly inaccurate. return super.getFilterEntries() / 2; } + + /** + * Overrides close method to handle cache evictions for the referred file. If evictionOnClose is + * true, we will seek to the block containing the splitCell and evict all blocks from offset 0 up + * to that block offset if this is a bottom half reader, or the from the split block offset up to + * the end of the file if this is a top half reader. + * @param evictOnClose true if it should evict the file blocks from the cache. + */ + @Override + public void close(boolean evictOnClose) throws IOException { + if (closed.compareAndSet(false, true)) { + if (evictOnClose) { + final HFileReaderImpl.HFileScannerImpl s = + (HFileReaderImpl.HFileScannerImpl) super.getScanner(false, true, false); + final String reference = this.reader.getHFileInfo().getHFileContext().getHFileName(); + final String referred = StoreFileInfo.getReferredToRegionAndFile(reference).getSecond(); + s.seekTo(splitCell); + if (s.getCurBlock() != null) { + long offset = s.getCurBlock().getOffset(); + LOG.trace("Seeking to split cell in reader: {} for file: {} top: {}, split offset: {}", + this, reference, top, offset); + ((HFileReaderImpl) reader).getCacheConf().getBlockCache().ifPresent(cache -> { + int numEvictedReferred = top + ? cache.evictBlocksRangeByHfileName(referred, offset, Long.MAX_VALUE) + : cache.evictBlocksRangeByHfileName(referred, 0, offset); + int numEvictedReference = cache.evictBlocksByHfileName(reference); + LOG.trace( + "Closing reference: {}; referred file: {}; was top? {}; evicted for referred: {};" + + "evicted for reference: {}", + reference, referred, top, numEvictedReferred, numEvictedReference); + }); + } + reader.close(false); + } else { + reader.close(evictOnClose); + } + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index bed0194b1fab..028a80075b5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -250,4 +250,15 @@ default Optional>> getFullyCachedFiles() { default Optional> getRegionCachedInfo() { return Optional.empty(); } + + /** + * Evict all blocks for the given file name between the passed offset values. + * @param hfileName The file for which blocks should be evicted. + * @param initOffset the initial offset for the range of blocks to be evicted. + * @param endOffset the end offset for the range of blocks to be evicted. + * @return number of blocks evicted. + */ + default int evictBlocksRangeByHfileName(String hfileName, long initOffset, long endOffset) { + return 0; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index e6a4b609bc7d..3d4698b0047e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.io.hfile; +import static org.apache.hadoop.hbase.io.hfile.HFileBlock.FILL_HEADER; + import java.io.IOException; import java.nio.ByteBuffer; import java.util.NavigableMap; @@ -25,7 +27,9 @@ import java.util.concurrent.ConcurrentSkipListSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; +import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -244,6 +248,44 @@ public static int getMaxCachedBlocksByFile(Configuration conf) { return conf == null ? DEFAULT_MAX : conf.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); } + /** + * Similarly to HFileBlock.Writer.getBlockForCaching(), creates a HFileBlock instance without + * checksum for caching. This is needed for when we cache blocks via readers (either prefetch or + * client read), otherwise we may fail equality comparison when checking against same block that + * may already have been cached at write time. + * @param cacheConf the related CacheConfig object. + * @param block the HFileBlock instance to be converted. + * @return the resulting HFileBlock instance without checksum. + */ + public static HFileBlock getBlockForCaching(CacheConfig cacheConf, HFileBlock block) { + // Calculate how many bytes we need for checksum on the tail of the block. + int numBytes = cacheConf.shouldCacheCompressed(block.getBlockType().getCategory()) + ? 0 + : (int) ChecksumUtil.numBytes(block.getOnDiskDataSizeWithHeader(), + block.getHFileContext().getBytesPerChecksum()); + ByteBuff buff = block.getBufferReadOnly(); + HFileBlockBuilder builder = new HFileBlockBuilder(); + return builder.withBlockType(block.getBlockType()) + .withOnDiskSizeWithoutHeader(block.getOnDiskSizeWithoutHeader()) + .withUncompressedSizeWithoutHeader(block.getUncompressedSizeWithoutHeader()) + .withPrevBlockOffset(block.getPrevBlockOffset()).withByteBuff(buff) + .withFillHeader(FILL_HEADER).withOffset(block.getOffset()).withNextBlockOnDiskSize(-1) + .withOnDiskDataSizeWithHeader(block.getOnDiskDataSizeWithHeader() + numBytes) + .withHFileContext(cloneContext(block.getHFileContext())) + .withByteBuffAllocator(cacheConf.getByteBuffAllocator()).withShared(!buff.hasArray()).build(); + } + + public static HFileContext cloneContext(HFileContext context) { + HFileContext newContext = new HFileContextBuilder().withBlockSize(context.getBlocksize()) + .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data + .withCompression(context.getCompression()) + .withDataBlockEncoding(context.getDataBlockEncoding()) + .withHBaseCheckSum(context.isUseHBaseChecksum()).withCompressTags(context.isCompressTags()) + .withIncludesMvcc(context.isIncludesMvcc()).withIncludesTags(context.isIncludesTags()) + .withColumnFamily(context.getColumnFamily()).withTableName(context.getTableName()).build(); + return newContext; + } + /** * Use one of these to keep a running account of cached blocks by file. Throw it away when done. * This is different than metrics in that it is stats on current state of a cache. See diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 7fb1f1ec85bd..78f62bfc77ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -72,6 +72,8 @@ public class CacheConfig implements ConfigurationObserver { */ public static final String EVICT_BLOCKS_ON_CLOSE_KEY = "hbase.rs.evictblocksonclose"; + public static final String EVICT_BLOCKS_ON_SPLIT_KEY = "hbase.rs.evictblocksonsplit"; + /** * Configuration key to prefetch all blocks of a given file into the block cache when the file is * opened. @@ -113,6 +115,7 @@ public class CacheConfig implements ConfigurationObserver { public static final boolean DEFAULT_CACHE_INDEXES_ON_WRITE = false; public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; public static final boolean DEFAULT_EVICT_ON_CLOSE = false; + public static final boolean DEFAULT_EVICT_ON_SPLIT = true; public static final boolean DEFAULT_CACHE_DATA_COMPRESSED = false; public static final boolean DEFAULT_PREFETCH_ON_OPEN = false; public static final boolean DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index d6692d2e2bf1..06bf2a76f756 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -109,8 +109,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea } } else { if (existInL1) { - LOG.warn("Cache key {} had block type {}, but was found in L1 cache.", cacheKey, - cacheKey.getBlockType()); updateBlockMetrics(block, cacheKey, l1Cache, caching); } else { updateBlockMetrics(block, cacheKey, l2Cache, caching); @@ -504,4 +502,9 @@ public Optional getBlockSize(BlockCacheKey key) { return l1Result.isPresent() ? l1Result : l2Cache.getBlockSize(key); } + @Override + public int evictBlocksRangeByHfileName(String hfileName, long initOffset, long endOffset) { + return l1Cache.evictBlocksRangeByHfileName(hfileName, initOffset, endOffset) + + l2Cache.evictBlocksRangeByHfileName(hfileName, initOffset, endOffset); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 47c20b691b4a..b24976707c33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -697,7 +697,7 @@ public boolean isUnpacked() { * when block is returned to the cache. * @return the offset of this block in the file it was read from */ - long getOffset() { + public long getOffset() { if (offset < 0) { throw new IllegalStateException("HFile block offset not initialized properly"); } @@ -1205,16 +1205,7 @@ void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException { * being wholesome (ECC memory or if file-backed, it does checksumming). */ HFileBlock getBlockForCaching(CacheConfig cacheConf) { - HFileContext newContext = new HFileContextBuilder().withBlockSize(fileContext.getBlocksize()) - .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data - .withCompression(fileContext.getCompression()) - .withDataBlockEncoding(fileContext.getDataBlockEncoding()) - .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) - .withCompressTags(fileContext.isCompressTags()) - .withIncludesMvcc(fileContext.isIncludesMvcc()) - .withIncludesTags(fileContext.isIncludesTags()) - .withColumnFamily(fileContext.getColumnFamily()).withTableName(fileContext.getTableName()) - .build(); + HFileContext newContext = BlockCacheUtil.cloneContext(fileContext); // Build the HFileBlock. HFileBlockBuilder builder = new HFileBlockBuilder(); ByteBuff buff; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 6063ffe68891..e6b79cc55cca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -45,7 +45,7 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c }); // Prefetch file blocks upon open if requested - if (cacheConf.shouldPrefetchOnOpen() && cacheIfCompactionsOff() && shouldCache.booleanValue()) { + if (cacheConf.shouldPrefetchOnOpen() && shouldCache.booleanValue()) { PrefetchExecutor.request(path, new Runnable() { @Override public void run() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index e0585c6edaa2..c66a709fe494 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.io.hfile; -import static org.apache.hadoop.hbase.regionserver.CompactSplit.HBASE_REGION_SERVER_ENABLE_COMPACTION; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.BLOCK_CACHE_KEY_KEY; import io.opentelemetry.api.common.Attributes; @@ -42,14 +41,12 @@ import org.apache.hadoop.hbase.SizeCachedKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; -import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; @@ -159,6 +156,10 @@ public BlockIndexNotLoadedException(Path path) { } } + public CacheConfig getCacheConf() { + return cacheConf; + } + private Optional toStringFirstKey() { return getFirstKey().map(CellUtil::getCellKeyAsString); } @@ -307,7 +308,7 @@ public NotSeekedException(Path path) { } } - protected static class HFileScannerImpl implements HFileScanner { + public static class HFileScannerImpl implements HFileScanner { private ByteBuff blockBuffer; protected final boolean cacheBlocks; protected final boolean pread; @@ -331,6 +332,7 @@ protected static class HFileScannerImpl implements HFileScanner { * loaded yet. */ protected Cell nextIndexedKey; + // Current block being used. NOTICE: DON't release curBlock separately except in shipped() or // close() methods. Because the shipped() or close() will do the release finally, even if any // exception occur the curBlock will be released by the close() method (see @@ -340,6 +342,11 @@ protected static class HFileScannerImpl implements HFileScanner { // Whether we returned a result for curBlock's size in recordBlockSize(). // gets reset whenever curBlock is changed. private boolean providedCurrentBlockSize = false; + + public HFileBlock getCurBlock() { + return curBlock; + } + // Previous blocks that were used in the course of the read protected final ArrayList prevBlocks = new ArrayList<>(); @@ -1283,8 +1290,6 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo new BlockCacheKey(path, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType); Attributes attributes = Attributes.of(BLOCK_CACHE_KEY_KEY, cacheKey.toString()); - boolean cacheable = cacheBlock && cacheIfCompactionsOff(); - boolean useLock = false; IdLock.Entry lockEntry = null; final Span span = Span.current(); @@ -1326,7 +1331,7 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo return cachedBlock; } - if (!useLock && cacheable && cacheConf.shouldLockOnCacheMiss(expectedBlockType)) { + if (!useLock && cacheBlock && cacheConf.shouldLockOnCacheMiss(expectedBlockType)) { // check cache again with lock useLock = true; continue; @@ -1337,7 +1342,7 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo span.addEvent("block cache miss", attributes); // Load block from filesystem. HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, - !isCompaction, shouldUseHeap(expectedBlockType, cacheable)); + !isCompaction, shouldUseHeap(expectedBlockType, cacheBlock)); try { validateBlockType(hfileBlock, expectedBlockType); } catch (IOException e) { @@ -1350,25 +1355,30 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo // Don't need the unpacked block back and we're storing the block in the cache compressed if (cacheOnly && cacheCompressed && cacheOnRead) { + HFileBlock blockNoChecksum = BlockCacheUtil.getBlockForCaching(cacheConf, hfileBlock); cacheConf.getBlockCache().ifPresent(cache -> { LOG.debug("Skipping decompression of block {} in prefetch", cacheKey); // Cache the block if necessary - if (cacheable && cacheConf.shouldCacheBlockOnRead(category)) { - cache.cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory(), cacheOnly); + if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) { + cache.cacheBlock(cacheKey, blockNoChecksum, cacheConf.isInMemory(), cacheOnly); } }); if (updateCacheMetrics && hfileBlock.getBlockType().isData()) { HFile.DATABLOCK_READ_COUNT.increment(); } - return hfileBlock; + return blockNoChecksum; } HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); + HFileBlock unpackedNoChecksum = BlockCacheUtil.getBlockForCaching(cacheConf, unpacked); // Cache the block if necessary cacheConf.getBlockCache().ifPresent(cache -> { - if (cacheable && cacheConf.shouldCacheBlockOnRead(category)) { + if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) { // Using the wait on cache during compaction and prefetching. - cache.cacheBlock(cacheKey, cacheCompressed ? hfileBlock : unpacked, + cache.cacheBlock(cacheKey, + cacheCompressed + ? BlockCacheUtil.getBlockForCaching(cacheConf, hfileBlock) + : unpackedNoChecksum, cacheConf.isInMemory(), cacheOnly); } }); @@ -1380,7 +1390,7 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo HFile.DATABLOCK_READ_COUNT.increment(); } - return unpacked; + return unpackedNoChecksum; } } finally { if (lockEntry != null) { @@ -1691,9 +1701,4 @@ public int getMajorVersion() { public void unbufferStream() { fsBlockReader.unbufferStream(); } - - protected boolean cacheIfCompactionsOff() { - return (!StoreFileInfo.isReference(name) && !HFileLink.isHFileLink(name)) - || !conf.getBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, true); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 71bfc757e51e..7ee7a03ba647 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.IdReadWriteLock; @@ -215,6 +216,8 @@ public class BucketCache implements BlockCache, HeapSize { // reset after a successful read/write. private volatile long ioErrorStartTime = -1; + private Configuration conf; + /** * A ReentrantReadWriteLock to lock on a particular block identified by offset. The purpose of * this is to avoid freeing the block which is being read. @@ -291,6 +294,7 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck } else { this.offsetLock = new IdReadWriteLockWithObjectPool<>(ReferenceType.SOFT); } + this.conf = conf; this.algorithm = conf.get(FILE_VERIFY_ALGORITHM, DEFAULT_FILE_VERIFY_ALGORITHM); this.ioEngine = getIOEngineFromName(ioEngineName, capacity, persistencePath); this.writerThreads = new WriterThread[writerThreadNum]; @@ -560,6 +564,30 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach } } + /** + * If the passed cache key relates to a reference (.), this method looks + * for the block from the referred file, in the cache. If present in the cache, the block for the + * referred file is returned, otherwise, this method returns null. It will also return null if the + * passed cache key doesn't relate to a reference. + * @param key the BlockCacheKey instance to look for in the cache. + * @return the cached block from the referred file, null if there's no such block in the cache or + * the passed key doesn't relate to a reference. + */ + public BucketEntry getBlockForReference(BlockCacheKey key) { + BucketEntry foundEntry = null; + String referredFileName = null; + if (StoreFileInfo.isReference(key.getHfileName())) { + referredFileName = StoreFileInfo.getReferredToRegionAndFile(key.getHfileName()).getSecond(); + } + if (referredFileName != null) { + BlockCacheKey convertedCacheKey = new BlockCacheKey(referredFileName, key.getOffset()); + foundEntry = backingMap.get(convertedCacheKey); + LOG.debug("Got a link/ref: {}. Related cacheKey: {}. Found entry: {}", key.getHfileName(), + convertedCacheKey, foundEntry); + } + return foundEntry; + } + /** * Get the buffer of the block with the specified key. * @param key block's cache key @@ -583,6 +611,9 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, return re.getData(); } BucketEntry bucketEntry = backingMap.get(key); + if (bucketEntry == null) { + bucketEntry = getBlockForReference(key); + } if (bucketEntry != null) { long start = System.nanoTime(); ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); @@ -591,7 +622,9 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, // We can not read here even if backingMap does contain the given key because its offset // maybe changed. If we lock BlockCacheKey instead of offset, then we can only check // existence here. - if (bucketEntry.equals(backingMap.get(key))) { + if ( + bucketEntry.equals(backingMap.get(key)) || bucketEntry.equals(getBlockForReference(key)) + ) { // Read the block from IOEngine based on the bucketEntry's offset and length, NOTICE: the // block will use the refCnt of bucketEntry, which means if two HFileBlock mapping to // the same BucketEntry, then all of the three will share the same refCnt. @@ -1658,8 +1691,15 @@ protected String getAlgorithm() { */ @Override public int evictBlocksByHfileName(String hfileName) { + return evictBlocksRangeByHfileName(hfileName, 0, Long.MAX_VALUE); + } + + @Override + public int evictBlocksRangeByHfileName(String hfileName, long initOffset, long endOffset) { fileNotFullyCached(hfileName); - Set keySet = getAllCacheKeysForFile(hfileName); + Set keySet = getAllCacheKeysForFile(hfileName, initOffset, endOffset); + LOG.debug("found {} blocks for file {}, starting offset: {}, end offset: {}", keySet.size(), + hfileName, initOffset, endOffset); int numEvicted = 0; for (BlockCacheKey key : keySet) { if (evictBlock(key)) { @@ -1669,9 +1709,9 @@ public int evictBlocksByHfileName(String hfileName) { return numEvicted; } - private Set getAllCacheKeysForFile(String hfileName) { - return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, - new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + private Set getAllCacheKeysForFile(String hfileName, long init, long end) { + return blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true, + new BlockCacheKey(hfileName, end), true); } /** @@ -2081,25 +2121,20 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d try { final MutableInt count = new MutableInt(); LOG.debug("iterating over {} entries in the backing map", backingMap.size()); - backingMap.entrySet().stream().forEach(entry -> { - if ( - entry.getKey().getHfileName().equals(fileName.getName()) - && entry.getKey().getBlockType().equals(BlockType.DATA) - ) { - long offsetToLock = entry.getValue().offset(); - LOG.debug("found block {} in the backing map. Acquiring read lock for offset {}", - entry.getKey(), offsetToLock); - ReentrantReadWriteLock lock = offsetLock.getLock(offsetToLock); - lock.readLock().lock(); - locks.add(lock); - // rechecks the given key is still there (no eviction happened before the lock acquired) - if (backingMap.containsKey(entry.getKey())) { - count.increment(); - } else { - lock.readLock().unlock(); - locks.remove(lock); - LOG.debug("found block {}, but when locked and tried to count, it was gone."); - } + Set result = getAllCacheKeysForFile(fileName.getName(), 0, Long.MAX_VALUE); + if (result.isEmpty() && StoreFileInfo.isReference(fileName)) { + result = getAllCacheKeysForFile( + StoreFileInfo.getReferredToRegionAndFile(fileName.getName()).getSecond(), 0, + Long.MAX_VALUE); + } + result.stream().forEach(entry -> { + LOG.debug("found block for file {} in the backing map. Acquiring read lock for offset {}", + fileName.getName(), entry.getOffset()); + ReentrantReadWriteLock lock = offsetLock.getLock(entry.getOffset()); + lock.readLock().lock(); + locks.add(lock); + if (backingMap.containsKey(entry) && entry.getBlockType() == BlockType.DATA) { + count.increment(); } }); int metaCount = totalBlockCount - dataBlockCount; @@ -2121,17 +2156,19 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d + "and try the verification again.", fileName); Thread.sleep(100); notifyFileCachingCompleted(fileName, totalBlockCount, dataBlockCount, size); - } else - if ((getAllCacheKeysForFile(fileName.getName()).size() - metaCount) == dataBlockCount) { - LOG.debug("We counted {} data blocks, expected was {}, there was no more pending in " - + "the cache write queue but we now found that total cached blocks for file {} " - + "is equal to data block count.", count, dataBlockCount, fileName.getName()); - fileCacheCompleted(fileName, size); - } else { - LOG.info("We found only {} data blocks cached from a total of {} for file {}, " - + "but no blocks pending caching. Maybe cache is full or evictions " - + "happened concurrently to cache prefetch.", count, dataBlockCount, fileName); - } + } else if ( + (getAllCacheKeysForFile(fileName.getName(), 0, Long.MAX_VALUE).size() - metaCount) + == dataBlockCount + ) { + LOG.debug("We counted {} data blocks, expected was {}, there was no more pending in " + + "the cache write queue but we now found that total cached blocks for file {} " + + "is equal to data block count.", count, dataBlockCount, fileName.getName()); + fileCacheCompleted(fileName, size); + } else { + LOG.info("We found only {} data blocks cached from a total of {} for file {}, " + + "but no blocks pending caching. Maybe cache is full or evictions " + + "happened concurrently to cache prefetch.", count, dataBlockCount, fileName); + } } } catch (InterruptedException e) { throw new RuntimeException(e); @@ -2157,14 +2194,20 @@ public Optional shouldCacheFile(String fileName) { @Override public Optional isAlreadyCached(BlockCacheKey key) { - return Optional.of(getBackingMap().containsKey(key)); + boolean foundKey = backingMap.containsKey(key); + // if there's no entry for the key itself, we need to check if this key is for a reference, + // and if so, look for a block from the referenced file using this getBlockForReference method. + return Optional.of(foundKey ? true : getBlockForReference(key) != null); } @Override public Optional getBlockSize(BlockCacheKey key) { BucketEntry entry = backingMap.get(key); if (entry == null) { - return Optional.empty(); + // the key might be for a reference tha we had found the block from the referenced file in + // the cache when we first tried to cache it. + entry = getBlockForReference(key); + return entry == null ? Optional.empty() : Optional.of(entry.getOnDiskSizeWithHeader()); } else { return Optional.of(entry.getOnDiskSizeWithHeader()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index 8dfc08a5de89..0ed740c7853e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.master.assignment; import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_CLOSE; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_SPLIT; import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY; import static org.apache.hadoop.hbase.master.LoadBalancer.BOGUS_SERVER_NAME; import static org.apache.hadoop.hbase.master.assignment.AssignmentManager.FORCE_REGION_RETAINMENT; @@ -369,12 +371,15 @@ private Flow confirmOpened(MasterProcedureEnv env, RegionStateNode regionNode) } } - private void closeRegionAfterUpdatingMeta(RegionStateNode regionNode) { - CloseRegionProcedure closeProc = isSplit - ? new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), assignCandidate, - true) - : new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), assignCandidate, - evictCache); + private void closeRegionAfterUpdatingMeta(MasterProcedureEnv env, RegionStateNode regionNode) { + CloseRegionProcedure closeProc = + isSplit + ? new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), + assignCandidate, + env.getMasterConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, + DEFAULT_EVICT_ON_SPLIT)) + : new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(), + assignCandidate, evictCache); addChildProcedure(closeProc); setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_CLOSED); } @@ -383,7 +388,7 @@ private void closeRegion(MasterProcedureEnv env, RegionStateNode regionNode) throws IOException, ProcedureSuspendedException { if ( ProcedureFutureUtil.checkFuture(this, this::getFuture, this::setFuture, - () -> closeRegionAfterUpdatingMeta(regionNode)) + () -> closeRegionAfterUpdatingMeta(env, regionNode)) ) { return; } @@ -391,7 +396,7 @@ private void closeRegion(MasterProcedureEnv env, RegionStateNode regionNode) // this is the normal case ProcedureFutureUtil.suspendIfNecessary(this, this::setFuture, env.getAssignmentManager().regionClosing(regionNode), env, - () -> closeRegionAfterUpdatingMeta(regionNode)); + () -> closeRegionAfterUpdatingMeta(env, regionNode)); } else { forceNewPlan = true; regionNode.setRegionLocation(null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 4f872d7084e1..7751df300e19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -68,7 +68,7 @@ public class StoreFileReader { protected BloomFilter deleteFamilyBloomFilter = null; private BloomFilterMetrics bloomFilterMetrics = null; protected BloomType bloomFilterType; - private final HFile.Reader reader; + protected final HFile.Reader reader; protected long sequenceID = -1; protected TimeRange timeRange = null; private byte[] lastBloomKey; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 217f2ebbd45a..a360759aea15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -124,11 +124,9 @@ public void process() throws IOException { region.getCoprocessorHost().preClose(abort); } // This should be true only in the case of splits/merges closing the parent regions, as - // there's no point on keep blocks for those region files. As hbase.rs.evictblocksonclose is - // false by default we don't bother overriding it if evictCache is false. - if (evictCache) { - region.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(true)); - } + // there's no point on keep blocks for those region files. + region.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(evictCache)); + if (region.close(abort) == null) { // XXX: Is this still possible? The old comment says about split, but now split is done at // master side, so... diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java new file mode 100644 index 000000000000..c308d5f6d832 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY; +import static org.apache.hadoop.hbase.io.hfile.CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MiscTests.class, MediumTests.class }) +public class TestSplitWithCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSplitWithCache.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestSplitWithCache.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + @BeforeClass + public static void setUp() throws Exception { + UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, 1000); + UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); + UTIL.getConfiguration().setBoolean(CACHE_BLOCKS_ON_WRITE_KEY, true); + UTIL.getConfiguration().setBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, true); + UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, "offheap"); + UTIL.getConfiguration().setInt(BUCKET_CACHE_SIZE_KEY, 200); + } + + @Test + public void testEvictOnSplit() throws Exception { + doTest("testEvictOnSplit", true, + (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null), + (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null)); + } + + @Test + public void testDoesntEvictOnSplit() throws Exception { + doTest("testDoesntEvictOnSplit", false, + (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null), + (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null)); + } + + private void doTest(String table, boolean evictOnSplit, + BiConsumer>> predicateBeforeSplit, + BiConsumer>> predicateAfterSplit) throws Exception { + UTIL.getConfiguration().setBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, evictOnSplit); + UTIL.startMiniCluster(1); + try { + TableName tableName = TableName.valueOf(table); + byte[] family = Bytes.toBytes("CF"); + TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + UTIL.getAdmin().createTable(td); + UTIL.waitTableAvailable(tableName); + Table tbl = UTIL.getConnection().getTable(tableName); + List puts = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + Put p = new Put(Bytes.toBytes("row-" + i)); + p.addColumn(family, Bytes.toBytes(1), Bytes.toBytes("val-" + i)); + puts.add(p); + } + tbl.put(puts); + UTIL.getAdmin().flush(tableName); + Collection files = + UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles(); + checkCacheForBlocks(tableName, files, predicateBeforeSplit); + UTIL.getAdmin().split(tableName, Bytes.toBytes("row-500")); + Waiter.waitFor(UTIL.getConfiguration(), 30000, + () -> UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2); + UTIL.waitUntilNoRegionsInTransition(); + checkCacheForBlocks(tableName, files, predicateAfterSplit); + } finally { + UTIL.shutdownMiniCluster(); + } + + } + + private void checkCacheForBlocks(TableName tableName, Collection files, + BiConsumer>> checker) { + files.forEach(f -> { + UTIL.getMiniHBaseCluster().getRegionServer(0).getBlockCache().ifPresent(cache -> { + cache.getFullyCachedFiles().ifPresent(m -> { + checker.accept(f.getPath().getName(), m); + }); + assertTrue(cache.getFullyCachedFiles().isPresent()); + }); + }); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 0a41159e3aaa..3a285a21f404 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -82,15 +84,19 @@ public static void tearDownAfterClass() throws Exception { */ @Test public void testHalfScanAndReseek() throws IOException { - String root_dir = TEST_UTIL.getDataTestDir().toString(); - Path p = new Path(root_dir, "test"); - Configuration conf = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(conf); + String root_dir = TEST_UTIL.getDataTestDir().toString(); + Path parentPath = new Path(new Path(root_dir, "parent"), "CF"); + fs.mkdirs(parentPath); + Path splitAPath = new Path(new Path(root_dir, "splita"), "CF"); + Path splitBPath = new Path(new Path(root_dir, "splitb"), "CF"); + Path filePath = StoreFileWriter.getUniqueFile(fs, parentPath); + CacheConfig cacheConf = new CacheConfig(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build(); HFile.Writer w = - HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withFileContext(meta).create(); + HFile.getWriterFactory(conf, cacheConf).withPath(fs, filePath).withFileContext(meta).create(); // write some things. List items = genSomeKeys(); @@ -99,26 +105,35 @@ public void testHalfScanAndReseek() throws IOException { } w.close(); - HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf); + HFile.Reader r = HFile.createReader(fs, filePath, cacheConf, true, conf); Cell midKV = r.midKey().get(); byte[] midkey = CellUtil.cloneRow(midKV); - // System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey)); + Path splitFileA = new Path(splitAPath, filePath.getName() + ".parent"); + Path splitFileB = new Path(splitBPath, filePath.getName() + ".parent"); Reference bottom = new Reference(midkey, Reference.Range.bottom); - doTestOfScanAndReseek(p, fs, bottom, cacheConf); + bottom.write(fs, splitFileA); + doTestOfScanAndReseek(splitFileA, fs, bottom, cacheConf); Reference top = new Reference(midkey, Reference.Range.top); - doTestOfScanAndReseek(p, fs, top, cacheConf); + top.write(fs, splitFileB); + doTestOfScanAndReseek(splitFileB, fs, top, cacheConf); r.close(); } private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, CacheConfig cacheConf) throws IOException { - ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, p).build(); - StoreFileInfo storeFileInfo = - new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, fs.getFileStatus(p), bottom); + Path referencePath = StoreFileInfo.getReferredToFile(p); + FSDataInputStreamWrapper in = new FSDataInputStreamWrapper(fs, referencePath, false, 0); + FileStatus status = fs.getFileStatus(referencePath); + long length = status.getLen(); + ReaderContextBuilder contextBuilder = + new ReaderContextBuilder().withInputStreamWrapper(in).withFileSize(length) + .withReaderType(ReaderContext.ReaderType.PREAD).withFileSystem(fs).withFilePath(p); + ReaderContext context = contextBuilder.build(); + StoreFileInfo storeFileInfo = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p, true); storeFileInfo.initHFileInfo(context); final HalfStoreFileReader halfreader = (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 6083d872c826..b172202c8d4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -323,14 +323,6 @@ public void testPrefetchCompressed() throws Exception { conf.setBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, false); } - @Test - public void testPrefetchSkipsRefs() throws Exception { - testPrefetchWhenRefs(true, c -> { - boolean isCached = c != null; - assertFalse(isCached); - }); - } - @Test public void testPrefetchDoesntSkipRefs() throws Exception { testPrefetchWhenRefs(false, c -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index 2d0a85962ef9..581d1893c17d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.io.hfile.BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -39,13 +40,20 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -135,6 +143,55 @@ public void testPrefetchDoesntOverwork() throws Exception { assertTrue(snapshot.get(key).getCachedTime() < bc.getBackingMap().get(key).getCachedTime()); } + @Test + public void testPrefetchRefsAfterSplit() throws Exception { + conf.setLong(BUCKET_CACHE_SIZE_KEY, 200); + blockCache = BlockCacheFactory.createBlockCache(conf); + cacheConf = new CacheConfig(conf, blockCache); + + Path tableDir = new Path(TEST_UTIL.getDataTestDir(), "testPrefetchRefsAfterSplit"); + RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build(); + Path regionDir = new Path(tableDir, region.getEncodedName()); + Path cfDir = new Path(regionDir, "cf"); + HRegionFileSystem regionFS = + HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, region); + Path storeFile = writeStoreFile(100, cfDir); + + // Prefetches the file blocks + LOG.debug("First read should prefetch the blocks."); + readStoreFile(storeFile); + BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get(); + // Our file should have 6 DATA blocks. We should wait for all of them to be cached + Waiter.waitFor(conf, 300, () -> bc.getBackingMap().size() == 6); + + // split the file and return references to the original file + Random rand = ThreadLocalRandom.current(); + byte[] splitPoint = RandomKeyValueUtil.randomOrderedKey(rand, 50); + HStoreFile file = new HStoreFile(fs, storeFile, conf, cacheConf, BloomType.NONE, true); + Path ref = regionFS.splitStoreFile(region, "cf", file, splitPoint, false, + new ConstantSizeRegionSplitPolicy()); + HStoreFile refHsf = new HStoreFile(this.fs, ref, conf, cacheConf, BloomType.NONE, true); + // starts reader for the ref. The ref should resolve to the original file blocks + // and not duplicate blocks in the cache. + refHsf.initReader(); + HFile.Reader reader = refHsf.getReader().getHFileReader(); + while (!reader.prefetchComplete()) { + // Sleep for a bit + Thread.sleep(1000); + } + // the ref file blocks keys should actually resolve to the referred file blocks, + // so we should not see additional blocks in the cache. + Waiter.waitFor(conf, 300, () -> bc.getBackingMap().size() == 6); + + BlockCacheKey refCacheKey = new BlockCacheKey(ref.getName(), 0); + Cacheable result = bc.getBlock(refCacheKey, true, false, true); + assertNotNull(result); + BlockCacheKey fileCacheKey = new BlockCacheKey(file.getPath().getName(), 0); + assertEquals(result, bc.getBlock(fileCacheKey, true, false, true)); + assertNull(bc.getBackingMap().get(refCacheKey)); + assertNotNull(bc.getBlockForReference(refCacheKey)); + } + @Test public void testPrefetchInterruptOnCapacity() throws Exception { conf.setLong(BUCKET_CACHE_SIZE_KEY, 1); @@ -270,10 +327,19 @@ private Path writeStoreFile(String fname, int numKVs) throws IOException { return writeStoreFile(fname, meta, numKVs); } + private Path writeStoreFile(int numKVs, Path regionCFDir) throws IOException { + HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); + return writeStoreFile(meta, numKVs, regionCFDir); + } + private Path writeStoreFile(String fname, HFileContext context, int numKVs) throws IOException { - Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), fname); + return writeStoreFile(context, numKVs, new Path(TEST_UTIL.getDataTestDir(), fname)); + } + + private Path writeStoreFile(HFileContext context, int numKVs, Path regionCFDir) + throws IOException { StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(storeFileParentDir).withFileContext(context).build(); + .withOutputDir(regionCFDir).withFileContext(context).build(); Random rand = ThreadLocalRandom.current(); final int rowLen = 32; for (int i = 0; i < numKVs; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java index d60d2c53ef6d..b3ac553582b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java @@ -49,6 +49,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ IOTests.class, MediumTests.class }) public class TestBucketCachePersister { @@ -61,6 +63,8 @@ public class TestBucketCachePersister { public int constructedBlockSize = 16 * 1024; + private static final Logger LOG = LoggerFactory.getLogger(TestBucketCachePersister.class); + public int[] constructedBlockSizes = new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024 }; @@ -166,6 +170,7 @@ public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception { HFile.createReader(fs, storeFile, cacheConf, true, conf); boolean evicted = false; while (!PrefetchExecutor.isCompleted(storeFile)) { + LOG.debug("Entered loop as prefetch for {} is still running.", storeFile); if (bucketCache.backingMap.size() > 0 && !evicted) { Iterator> it = bucketCache.backingMap.entrySet().iterator(); @@ -174,6 +179,7 @@ public void testPrefetchBlockEvictionWhilePrefetchRunning() throws Exception { while (it.hasNext() && !evicted) { if (entry.getKey().getBlockType().equals(BlockType.DATA)) { evicted = bucketCache.evictBlock(it.next().getKey()); + LOG.debug("Attempted eviction for {}. Succeeded? {}", storeFile, evicted); } } } From 67cc82029c1b3b06f224b825641163d2db2188d4 Mon Sep 17 00:00:00 2001 From: d-c-manning <67607031+d-c-manning@users.noreply.github.com> Date: Wed, 19 Jun 2024 12:21:18 -0700 Subject: [PATCH 422/514] HBASE-28663 Graceful shutdown of CanaryTool timeouts (#5991) Signed-off-by: Viraj Jasani Signed-off-by: Mihir Monani --- .../apache/hadoop/hbase/tool/CanaryTool.java | 41 ++++++++++++++-- .../hadoop/hbase/tool/TestCanaryTool.java | 48 +++++++++++++++++++ 2 files changed, 86 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index 92dca7c24c92..21e9edfe0688 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -198,6 +198,10 @@ public interface Sink { long getWriteSuccessCount(); long incWriteSuccessCount(); + + void stop(); + + boolean isStopped(); } /** @@ -208,6 +212,7 @@ public static class StdOutSink implements Sink { readSuccessCount = new AtomicLong(0), writeSuccessCount = new AtomicLong(0); private Map readFailures = new ConcurrentHashMap<>(); private Map writeFailures = new ConcurrentHashMap<>(); + private volatile boolean stopped = false; @Override public long getReadFailureCount() { @@ -268,6 +273,15 @@ public long getWriteSuccessCount() { public long incWriteSuccessCount() { return writeSuccessCount.incrementAndGet(); } + + public void stop() { + stopped = true; + } + + @Override + public boolean isStopped() { + return stopped; + } } /** @@ -444,6 +458,9 @@ public ZookeeperTask(Connection connection, String host, String znode, int timeo @Override public Void call() throws Exception { + if (this.sink.isStopped()) { + return null; + } ZooKeeper zooKeeper = null; try { zooKeeper = new ZooKeeper(host, timeout, EmptyWatcher.instance); @@ -498,6 +515,9 @@ public enum TaskType { @Override public Void call() { + if (this.sink.isStopped()) { + return null; + } switch (taskType) { case READ: return read(); @@ -685,6 +705,9 @@ static class RegionServerTask implements Callable { @Override public Void call() { + if (this.sink.isStopped()) { + return null; + } TableName tableName = null; Table table = null; Get get = null; @@ -1075,6 +1098,7 @@ private int runMonitor(String[] monitorTargets) throws Exception { if (currentTimeLength > timeout) { LOG.error("The monitor is running too long (" + currentTimeLength + ") after timeout limit:" + timeout + " will be killed itself !!"); + monitorThread.interrupt(); if (monitor.initialized) { return TIMEOUT_ERROR_EXIT_CODE; } else { @@ -1113,6 +1137,15 @@ public Map getWriteFailures() { return sink.getWriteFailures(); } + /** + * Return a CanaryTool.Sink object containing the detailed results of the canary run. The Sink may + * not have been created if a Monitor thread is not yet running. + * @return the active Sink if one exists, null otherwise. + */ + public Sink getActiveSink() { + return sink; + } + private void printUsageAndExit() { System.err.println( "Usage: canary [OPTIONS] [ [ [ regions = testingUtility.getAdmin().getRegions(tableName); + assertTrue("verify table has multiple regions", regions.size() > 1); + HRegionServer regionserver = testingUtility.getMiniHBaseCluster().getRegionServer(0); + for (RegionInfo region : regions) { + closeRegion(testingUtility, regionserver, region); + } + + // Run CanaryTool with 1 thread. This thread will attempt to scan the first region. + // It will use default rpc retries and receive NotServingRegionExceptions for many seconds + // according to HConstants.RETRY_BACKOFF. The CanaryTool timeout is set to 4 seconds, so it + // will time out before the first region scan is complete. + ExecutorService executor = new ScheduledThreadPoolExecutor(1); + CanaryTool canary = new CanaryTool(executor); + String[] args = { "-t", "4000", tableName.getNameAsString() }; + int retCode = ToolRunner.run(testingUtility.getConfiguration(), canary, args); + executor.shutdown(); + try { + if (!executor.awaitTermination(3, TimeUnit.SECONDS)) { + executor.shutdownNow(); + } + } catch (InterruptedException e) { + executor.shutdownNow(); + } + + CanaryTool.Sink sink = canary.getActiveSink(); + assertEquals("verify canary timed out with TIMEOUT_ERROR_EXIT_CODE", 3, retCode); + assertEquals("verify only the first region failed", 1, sink.getReadFailureCount()); + assertEquals("verify no successful reads", 0, sink.getReadSuccessCount()); + assertEquals("verify we were attempting to scan all regions", regions.size(), + ((CanaryTool.RegionStdOutSink) sink).getTotalExpectedRegions()); + } + @Test public void testCanaryRegionTaskReadAllCF() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); From 62e7fe8fdae6ba1f26424d81fd36ee9e9e02e0a3 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 20 Jun 2024 14:13:51 +0800 Subject: [PATCH 423/514] HBASE-28617 Addendum update doap --- src/site/resources/doap_Hbase.rdf | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/site/resources/doap_Hbase.rdf b/src/site/resources/doap_Hbase.rdf index 47c10d1a6328..09f41fd3655a 100644 --- a/src/site/resources/doap_Hbase.rdf +++ b/src/site/resources/doap_Hbase.rdf @@ -21,24 +21,24 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 2012-04-14 Apache HBase - - + + Apache HBase software is the Hadoop database. Think of it as a distributed, scalable, big data store. Use Apache HBase software when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware. HBase is an open-source, distributed, versioned, column-oriented store modeled after Google's Bigtable: A Distributed Storage System for Structured Data by Chang et al. Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of Hadoop and HDFS. - - - + + + Java - + - Apache hbase - 2015-07-23 - 2.0.0-SNAPSHOT + Apache HBase + 2024-05-17 + 2.6.0 From 52eef65d37ebf685731559556c4fac8baf06d7bc Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 20 Jun 2024 22:09:32 +0800 Subject: [PATCH 424/514] HBASE-28548 Add documentation about the URI based connection registry (#5981) Signed-off-by: Nick Dimiduk --- src/main/asciidoc/_chapters/architecture.adoc | 68 ++++++++++++++++++- .../asciidoc/_chapters/configuration.adoc | 35 +++++++--- 2 files changed, 92 insertions(+), 11 deletions(-) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index 4aead5e3e841..08f353972468 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -357,7 +357,7 @@ as bootstrap nodes, not only masters . Support refreshing bootstrap nodes, for spreading loads across the nodes in the cluster, and also remove the dead nodes in bootstrap nodes. -To explicitly enable the Master-based registry, use +To explicitly enable the rpc-based registry, use [source, xml] ---- @@ -417,6 +417,72 @@ configuration to fallback to the ZooKeeper based connection registry implementat ---- +[[client.connectionuri]] +=== Connection URI +Starting from 2.7.0, we add the support for specifying the connection information for a HBase +cluster through an URI, which we call a "connection URI". And we've added several methods in +_ConnectionFactory_ to let you get a connection to the cluster specified by the URI. It looks +like: + +[source, java] +---- + URI uri = new URI("hbase+rpc://server1:16020,server2:16020,server3:16020"); + try (Connection conn = ConnectionFactory.createConnection(uri)) { + ... + } +---- + +==== Supported Schemes +Currently there are two schemes supported, _hbase+rpc_ for _RpcConnectionRegistry_ and _hbase+zk_ +for _ZKConnectionRegistry_. _MasterRegistry_ is deprecated so we do not expose it through +connection URI. + +For _hbase+rpc_, it looks like +[source, shell] +---- +hbase+rpc://server1:16020,server2:16020,server3:16020 +---- + +The authority part _server1:16020,server2:16020,server3:16020_ specifies the bootstrap nodes and +their rpc ports, i.e, the configuration value for _hbase.client.bootstrap.servers_ in the past. + +For _hbase+zk_, it looks like +[source, shell] +---- +hbase+zk://zk1:2181,zk2:2181,zk3:2181/hbase +---- + +The authority part _zk1:2181,zk2:2181,zk3:2181_ is the zk quorum, i.e, the configuration value +for _hbase.zookeeper.quorum_ in the past. +The path part _/hbase_ is the znode parent, i.e, the configuration value for +_zookeeper.znode.parent_ in the past. + +==== Specify Configuration through URI Queries +To let users fully specify the connection information through a connection URI, we support +specifying configuration values through URI Queries. It looks like: + +[source, shell] +---- +hbase+rpc://server1:16020?hbase.client.operation.timeout=10000 +---- + +In this way you can set the operation timeout to 10 seconds. Notice that, the configuration values +specified in the connection URI will override the ones in the configuration file. + +==== Implement Your Own Connection Registry +We use _ServiceLoader_ to load different connection registry implementations, the entry point is +_org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory_. So if you implement your own +_ConnectionRegistryURIFactory_ which has a different scheme, and register it in the services file, +we can load it at runtime. + +Connection URI is still a very new feature which has not been used extensively in production, so +we do not want to expose the ability to customize _ConnectionRegistryURIFactory_ yet as the API +may be changed frequently in the beginning. + +If you really want to implement your own connection registry, you can use the above way but take +your own risk. + + [[client.filter]] == Client Request Filters diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 8e25bf9ed787..47481ab5c559 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -746,8 +746,8 @@ be changed for particular daemons via the HBase UI. If you are running HBase in standalone mode, you don't need to configure anything for your client to work provided that they are all on the same machine. -Starting release 3.0.0, the default connection registry has been switched to a master based -implementation. Refer to <> for more details about what a connection +Starting release 3.0.0, the default connection registry has been switched to a rpc based +implementation. Refer to <> for more details about what a connection registry is and implications of this change. Depending on your HBase version, following is the expected minimal client configuration. @@ -772,11 +772,11 @@ before they can do anything. This can be configured in the client configuration ---- -==== Starting 3.0.0 release +==== Starting from 3.0.0 release -The default implementation was switched to a master based connection registry. With this -implementation, clients always contact the active or stand-by master RPC end points to fetch the -connection registry information. This means that the clients should have access to the list of +The default implementation was switched to a rpc based connection registry. With this +implementation, by default clients contact the active or stand-by master RPC end points to fetch +the connection registry information. This means that the clients should have access to the list of active and master end points before they can do anything. This can be configured in the client configuration xml as follows: @@ -796,8 +796,22 @@ configuration xml as follows: The configuration value for _hbase.masters_ is a comma separated list of _host:port_ values. If no port value is specified, the default of _16000_ is assumed. -Usually this configuration is kept out in the _hbase-site.xml_ and is picked up by the client from -the `CLASSPATH`. +Of course you are free to specify bootstrap nodes other than masters, like: +[source,xml] +---- + + + + hbase.client.bootstrap.servers + server1:16020,server2:16020,server3:16020 + +---- + +The configuration value for _hbase.client.bootstrap.servers_ is a comma separated list of +_host:port_ values. Notice that port must be specified here. + +Usually these configurations are kept out in the _hbase-site.xml_ and is picked up by the client +from the `CLASSPATH`. If you are configuring an IDE to run an HBase client, you should include the _conf/_ directory on your classpath so _hbase-site.xml_ settings can be found (or add _src/test/resources_ to pick up @@ -827,14 +841,15 @@ in the content of the first _hbase-site.xml_ found on the client's `CLASSPATH`, the _hbase.X.X.X.jar_). It is also possible to specify configuration directly without having to read from a _hbase-site.xml_. -For example, to set the ZooKeeper ensemble for the cluster programmatically do as follows: +For example, to set the ZooKeeper ensemble or bootstrap nodes for the cluster programmatically +do as follows: [source,java] ---- Configuration config = HBaseConfiguration.create(); config.set("hbase.zookeeper.quorum", "localhost"); // Until 2.x.y versions // ---- or ---- -config.set("hbase.masters", "localhost:1234"); // Starting 3.0.0 version +config.set("hbase.client.bootstrap.servers", "localhost:1234"); // Starting 3.0.0 version ---- [[config_timeouts]] From 85a8b54213ddd3220df59b6ecdf295c9f891c46f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 21 Jun 2024 12:06:50 +0800 Subject: [PATCH 425/514] HBASE-28644 Use ExtendedCell instead of Cell in KeyValueScanner (#5976) Signed-off-by: Andrew Purtell --- .../apache/hadoop/hbase/client/Result.java | 8 +- .../apache/hadoop/hbase/filter/Filter.java | 13 +- .../hadoop/hbase/filter/KeyOnlyFilter.java | 19 ++- .../org/apache/hadoop/hbase/CellUtil.java | 4 +- .../org/apache/hadoop/hbase/KeyValueUtil.java | 14 +- .../apache/hadoop/hbase/PrivateCellUtil.java | 76 ++++----- .../java/org/apache/hadoop/hbase/RawCell.java | 8 +- .../io/encoding/AbstractDataBlockEncoder.java | 4 +- .../io/encoding/BufferedDataBlockEncoder.java | 24 +-- .../io/encoding/CopyKeyDataBlockEncoder.java | 6 +- .../hbase/io/encoding/DataBlockEncoder.java | 14 +- .../io/encoding/DiffKeyDeltaEncoder.java | 8 +- .../hbase/io/encoding/EncodingState.java | 4 +- .../io/encoding/FastDiffDeltaEncoder.java | 8 +- .../io/encoding/PrefixKeyDeltaEncoder.java | 8 +- .../hbase/io/encoding/RowIndexCodecV1.java | 10 +- .../hbase/io/encoding/RowIndexEncoderV1.java | 8 +- .../hbase/io/encoding/RowIndexSeekerV1.java | 13 +- .../hbase/util/RedundantKVGenerator.java | 6 +- .../hbase/mapreduce/HFileOutputFormat2.java | 5 +- .../hadoop/hbase/mapreduce/PutCombiner.java | 3 +- .../hbase/mapreduce/PutSortReducer.java | 3 +- .../hbase/mapreduce/TextSortReducer.java | 10 +- .../hbase/mapreduce/TestImportExport.java | 3 +- .../hadoop/hbase/io/HalfStoreFileReader.java | 41 ++--- .../hbase/io/hfile/BlockWithScanInfo.java | 8 +- .../io/hfile/CompoundBloomFilterWriter.java | 5 +- .../apache/hadoop/hbase/io/hfile/HFile.java | 8 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 4 +- .../hbase/io/hfile/HFileBlockIndex.java | 40 ++--- .../hbase/io/hfile/HFileDataBlockEncoder.java | 4 +- .../io/hfile/HFileDataBlockEncoderImpl.java | 4 +- .../io/hfile/HFileIndexBlockEncoder.java | 7 +- .../hadoop/hbase/io/hfile/HFileInfo.java | 5 +- .../hbase/io/hfile/HFileReaderImpl.java | 57 +++---- .../hadoop/hbase/io/hfile/HFileScanner.java | 31 ++-- .../hbase/io/hfile/HFileWriterImpl.java | 15 +- .../hbase/io/hfile/NoOpDataBlockEncoder.java | 4 +- .../hbase/io/hfile/NoOpIndexBlockEncoder.java | 21 +-- .../hbase/mob/DefaultMobStoreCompactor.java | 13 +- .../hbase/mob/DefaultMobStoreFlusher.java | 10 +- .../org/apache/hadoop/hbase/mob/MobCell.java | 9 +- .../org/apache/hadoop/hbase/mob/MobFile.java | 7 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 7 +- .../region/WALProcedurePrettyPrinter.java | 3 +- .../hbase/regionserver/AbstractMemStore.java | 71 ++++----- .../CellArrayImmutableSegment.java | 22 +-- .../hbase/regionserver/CellArrayMap.java | 12 +- .../CellChunkImmutableSegment.java | 32 ++-- .../hbase/regionserver/CellChunkMap.java | 15 +- .../hbase/regionserver/CellFlatMap.java | 137 ++++++++-------- .../hadoop/hbase/regionserver/CellSet.java | 56 +++---- .../hadoop/hbase/regionserver/CellSink.java | 8 +- .../regionserver/CompactingMemStore.java | 7 +- .../CompositeImmutableSegment.java | 17 +- .../DateTieredMultiFileWriter.java | 4 +- .../hbase/regionserver/DefaultMemStore.java | 5 +- .../hadoop/hbase/regionserver/HMobStore.java | 14 +- .../hadoop/hbase/regionserver/HRegion.java | 147 ++++++++++-------- .../hbase/regionserver/HRegionFileSystem.java | 5 +- .../hadoop/hbase/regionserver/HStore.java | 11 +- .../hadoop/hbase/regionserver/HStoreFile.java | 10 +- .../regionserver/ImmutableMemStoreLAB.java | 8 +- .../hbase/regionserver/KeyValueHeap.java | 29 ++-- .../hbase/regionserver/KeyValueScanner.java | 20 +-- .../hadoop/hbase/regionserver/MemStore.java | 7 +- .../MemStoreCompactorSegmentsIterator.java | 15 +- .../hbase/regionserver/MemStoreLAB.java | 10 +- .../hbase/regionserver/MemStoreLABImpl.java | 27 ++-- .../MemStoreMergerSegmentsIterator.java | 4 +- .../MemStoreSegmentsIterator.java | 4 +- .../hbase/regionserver/MobStoreScanner.java | 4 +- .../hbase/regionserver/MutableSegment.java | 21 +-- .../regionserver/NonLazyKeyValueScanner.java | 9 +- .../NonReversedNonLazyKeyValueScanner.java | 6 +- .../hbase/regionserver/RegionScannerImpl.java | 9 +- .../regionserver/ReversedKeyValueHeap.java | 18 ++- .../regionserver/ReversedMobStoreScanner.java | 6 +- .../regionserver/ReversedStoreScanner.java | 15 +- .../hadoop/hbase/regionserver/Segment.java | 30 ++-- .../hbase/regionserver/SegmentFactory.java | 3 +- .../hbase/regionserver/SegmentScanner.java | 43 ++--- .../regionserver/SnapshotSegmentScanner.java | 20 +-- .../hadoop/hbase/regionserver/StoreFile.java | 6 +- .../hbase/regionserver/StoreFileReader.java | 11 +- .../hbase/regionserver/StoreFileScanner.java | 54 +++---- .../hbase/regionserver/StoreFileWriter.java | 43 ++--- .../hbase/regionserver/StoreFlusher.java | 11 +- .../hbase/regionserver/StoreScanner.java | 65 ++++---- .../hadoop/hbase/regionserver/StoreUtils.java | 3 +- .../regionserver/StripeMultiFileWriter.java | 5 +- .../hbase/regionserver/TimeRangeTracker.java | 4 +- .../regionserver/compactions/Compactor.java | 10 +- .../handler/ParallelSeekHandler.java | 6 +- .../querymatcher/ColumnTracker.java | 23 +-- .../CompactionScanQueryMatcher.java | 8 +- .../querymatcher/DeleteTracker.java | 6 +- ...DropDeletesCompactionScanQueryMatcher.java | 4 +- .../querymatcher/ExplicitColumnTracker.java | 10 +- .../IncludeAllCompactionQueryMatcher.java | 4 +- .../MajorCompactionScanQueryMatcher.java | 4 +- .../MinorCompactionScanQueryMatcher.java | 4 +- .../NewVersionBehaviorTracker.java | 18 +-- .../NormalUserScanQueryMatcher.java | 4 +- .../querymatcher/RawScanQueryMatcher.java | 4 +- .../querymatcher/ScanDeleteTracker.java | 8 +- .../querymatcher/ScanQueryMatcher.java | 35 +++-- .../ScanWildcardColumnTracker.java | 12 +- .../StripeCompactionScanQueryMatcher.java | 4 +- .../querymatcher/UserScanQueryMatcher.java | 24 ++- .../regionserver/wal/SecureWALCellCodec.java | 8 +- .../hbase/regionserver/wal/WALCellCodec.java | 5 +- .../security/access/AccessControlFilter.java | 3 +- .../security/access/AccessController.java | 12 +- .../DefaultVisibilityLabelServiceImpl.java | 4 +- .../visibility/VisibilityController.java | 34 ++-- .../VisibilityNewVersionBehaivorTracker.java | 12 +- .../VisibilityReplicationEndpoint.java | 5 +- .../VisibilityScanDeleteTracker.java | 10 +- .../security/visibility/VisibilityUtils.java | 9 +- .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 3 +- .../hadoop/hbase/util/BloomContext.java | 7 +- .../hbase/util/CollectionBackedScanner.java | 32 ++-- .../hadoop/hbase/util/CompressionTest.java | 3 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 5 +- .../hadoop/hbase/util/RowBloomContext.java | 4 +- .../hadoop/hbase/util/RowColBloomContext.java | 6 +- .../RowPrefixFixedLengthBloomContext.java | 6 +- .../wal/BoundedRecoveredHFilesOutputSink.java | 12 +- .../org/apache/hadoop/hbase/wal/WALEdit.java | 2 +- .../hadoop/hbase/wal/WALPrettyPrinter.java | 10 +- .../hbase/HFilePerformanceEvaluation.java | 10 +- .../hadoop/hbase/TestTagRewriteCell.java | 6 +- .../TestFromClientSideScanExcpetion.java | 4 +- .../hbase/io/TestHalfStoreFileReader.java | 5 +- .../io/encoding/TestDataBlockEncoders.java | 7 +- .../encoding/TestSeekToBlockWithEncoders.java | 8 +- .../hadoop/hbase/io/hfile/TestHFile.java | 25 +-- .../hfile/TestHFileBlockHeaderCorruption.java | 10 +- .../io/hfile/TestHFileDataBlockEncoder.java | 8 +- .../TestHFileScannerImplReferenceCount.java | 6 +- .../hfile/TestSeekBeforeWithInlineBlocks.java | 9 +- .../hbase/mob/FaultyMobStoreCompactor.java | 9 +- .../DelegatingKeyValueScanner.java | 19 +-- .../EncodedSeekPerformanceTest.java | 17 +- .../regionserver/KeyValueScanFixture.java | 4 +- .../hbase/regionserver/MockHStoreFile.java | 15 +- .../hbase/regionserver/TestCellFlatSet.java | 88 ++++++----- .../regionserver/TestCellSkipListSet.java | 11 +- .../regionserver/TestCompactingMemStore.java | 5 +- .../regionserver/TestDefaultMemStore.java | 5 +- .../hbase/regionserver/TestHMobStore.java | 9 +- .../hadoop/hbase/regionserver/TestHStore.java | 106 ++++++------- .../hbase/regionserver/TestKeyValueHeap.java | 33 ++-- .../hbase/regionserver/TestMemStoreLAB.java | 3 +- .../TestMemstoreLABWithoutPool.java | 4 +- .../TestSecureBulkLoadManager.java | 3 +- .../hbase/regionserver/TestStoreScanner.java | 21 +-- .../TestUserScanQueryMatcher.java | 7 +- .../wal/AbstractTestWALReplay.java | 3 +- .../hadoop/hbase/util/HFileTestUtil.java | 3 +- 161 files changed, 1350 insertions(+), 1158 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 6d1050196d83..ec1d3f2b1fab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -125,19 +125,19 @@ private Result(boolean readonly) { * Note: You must ensure that the keyvalues are already sorted. * @param cells List of cells */ - public static Result create(List cells) { + public static Result create(List cells) { return create(cells, null); } - public static Result create(List cells, Boolean exists) { + public static Result create(List cells, Boolean exists) { return create(cells, exists, false); } - public static Result create(List cells, Boolean exists, boolean stale) { + public static Result create(List cells, Boolean exists, boolean stale) { return create(cells, exists, stale, false); } - public static Result create(List cells, Boolean exists, boolean stale, + public static Result create(List cells, Boolean exists, boolean stale, boolean mayHaveMoreCellsInRow) { if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index 8140793fc77a..6e2249e83e3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -93,15 +93,18 @@ public ReturnCode filterCell(final Cell c) throws IOException { } /** - * Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new Cell - * object must be returned. + * Give the filter a chance to transform the passed Cell. If the Cell is changed a new Cell object + * must be returned. + *

    + * NOTICE: Filter will be evaluate at server side so the returned {@link Cell} + * must be an {@link org.apache.hadoop.hbase.ExtendedCell}, although it is marked as IA.Private. * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() The transformed KeyValue is what is * eventually returned to the client. Most filters will return the passed KeyValue unchanged. * @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transformCell(Cell) for an example of a * transformation. Concrete implementers can signal a failure condition in their code by * throwing an {@link IOException}. - * @param v the KeyValue in question - * @return the changed KeyValue + * @param v the Cell in question + * @return the changed Cell * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public Cell transformCell(final Cell v) throws IOException; @@ -177,6 +180,8 @@ public enum ReturnCode { * the next key it must seek to. After receiving the match code SEEK_NEXT_USING_HINT, the * QueryMatcher would call this function to find out which key it must next seek to. Concrete * implementers can signal a failure condition in their code by throwing an {@link IOException}. + * NOTICE: Filter will be evaluate at server side so the returned {@link Cell} + * must be an {@link org.apache.hadoop.hbase.ExtendedCell}, although it is marked as IA.Private. * @return KeyValue which must be next seeked. return null if the filter is not sure which key to * seek to next. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index e2711a774aa5..3cbd2771a62d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -26,9 +26,11 @@ import java.util.Optional; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; @@ -144,7 +146,7 @@ public int hashCode() { return Objects.hash(this.lenAsVal); } - static class KeyOnlyCell implements Cell { + static class KeyOnlyCell implements ExtendedCell { private Cell cell; private int keyLen; private boolean lenAsVal; @@ -267,6 +269,21 @@ public int getTagsLength() { public long heapSize() { return cell.heapSize(); } + + @Override + public void setSequenceId(long seqId) throws IOException { + PrivateCellUtil.setSequenceId(cell, seqId); + } + + @Override + public void setTimestamp(long ts) throws IOException { + PrivateCellUtil.setTimestamp(cell, ts); + } + + @Override + public void setTimestamp(byte[] ts) throws IOException { + PrivateCellUtil.setTimestamp(cell, ts); + } } static class KeyOnlyByteBufferExtendedCell extends ByteBufferExtendedCell { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 80dcf8c505db..85f23550efc7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -827,6 +827,8 @@ public static void cloneIfNecessary(ArrayList cells) { } public static Cell cloneIfNecessary(Cell cell) { - return (cell instanceof ByteBufferExtendedCell ? KeyValueUtil.copyToNewKeyValue(cell) : cell); + return (cell instanceof ByteBufferExtendedCell + ? KeyValueUtil.copyToNewKeyValue((ExtendedCell) cell) + : cell); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index 71f1da9a8a67..4b61688abc28 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -99,7 +99,7 @@ public static KeyValue copyToNewKeyValue(final Cell cell) { * The position will be set to the beginning of the new ByteBuffer * @return the Bytebuffer containing the key part of the cell */ - public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) { + public static ByteBuffer copyKeyToNewByteBuffer(final ExtendedCell cell) { byte[] bytes = new byte[keyLength(cell)]; appendKeyTo(cell, bytes, 0); ByteBuffer buffer = ByteBuffer.wrap(bytes); @@ -110,7 +110,7 @@ public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) { * Copies the key to a new KeyValue * @return the KeyValue that consists only the key part of the incoming cell */ - public static KeyValue toNewKeyCell(final Cell cell) { + public static KeyValue toNewKeyCell(final ExtendedCell cell) { byte[] bytes = new byte[keyLength(cell)]; appendKeyTo(cell, bytes, 0); KeyValue kv = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); @@ -163,7 +163,7 @@ public static int appendToByteArray(Cell cell, byte[] output, int offset, boolea /** * Copy the Cell content into the passed buf in KeyValue serialization format. */ - public static int appendTo(Cell cell, ByteBuffer buf, int offset, boolean withTags) { + public static int appendTo(ExtendedCell cell, ByteBuffer buf, int offset, boolean withTags) { offset = ByteBufferUtils.putInt(buf, offset, keyLength(cell));// Key length offset = ByteBufferUtils.putInt(buf, offset, cell.getValueLength());// Value length offset = appendKeyTo(cell, buf, offset); @@ -176,7 +176,7 @@ public static int appendTo(Cell cell, ByteBuffer buf, int offset, boolean withTa return offset; } - public static int appendKeyTo(Cell cell, ByteBuffer buf, int offset) { + public static int appendKeyTo(ExtendedCell cell, ByteBuffer buf, int offset) { offset = ByteBufferUtils.putShort(buf, offset, cell.getRowLength());// RK length offset = CellUtil.copyRowTo(cell, buf, offset);// Row bytes offset = ByteBufferUtils.putByte(buf, offset, cell.getFamilyLength());// CF length @@ -433,10 +433,10 @@ public static KeyValue ensureKeyValue(final Cell cell) { } @Deprecated - public static List ensureKeyValues(List cells) { - List lazyList = Lists.transform(cells, new Function() { + public static List ensureKeyValues(List cells) { + List lazyList = Lists.transform(cells, new Function() { @Override - public KeyValue apply(Cell arg0) { + public KeyValue apply(ExtendedCell arg0) { return KeyValueUtil.ensureKeyValue(arg0); } }); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java index 58c4b2d1cf15..9013f9a9f26c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java @@ -101,19 +101,19 @@ public static ByteBuffer getValueBufferShallowCopy(Cell cell) { } /** Returns A new cell which is having the extra tags also added to it. */ - public static Cell createCell(Cell cell, List tags) { + public static ExtendedCell createCell(ExtendedCell cell, List tags) { return createCell(cell, TagUtil.fromList(tags)); } /** Returns A new cell which is having the extra tags also added to it. */ - public static Cell createCell(Cell cell, byte[] tags) { + public static ExtendedCell createCell(ExtendedCell cell, byte[] tags) { if (cell instanceof ByteBufferExtendedCell) { return new TagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) cell, tags); } return new TagRewriteCell(cell, tags); } - public static Cell createCell(Cell cell, byte[] value, byte[] tags) { + public static ExtendedCell createCell(ExtendedCell cell, byte[] value, byte[] tags) { if (cell instanceof ByteBufferExtendedCell) { return new ValueAndTagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) cell, value, tags); @@ -127,7 +127,7 @@ public static Cell createCell(Cell cell, byte[] value, byte[] tags) { * other parts, refer to the original Cell. */ static class TagRewriteCell implements ExtendedCell { - protected Cell cell; + protected ExtendedCell cell; protected byte[] tags; private static final int HEAP_SIZE_OVERHEAD = ClassSize.OBJECT + 2 * ClassSize.REFERENCE; @@ -136,8 +136,7 @@ static class TagRewriteCell implements ExtendedCell { * @param cell The original Cell which it rewrites * @param tags the tags bytes. The array suppose to contain the tags bytes alone. */ - public TagRewriteCell(Cell cell, byte[] tags) { - assert cell instanceof ExtendedCell; + public TagRewriteCell(ExtendedCell cell, byte[] tags) { assert tags != null; this.cell = cell; this.tags = tags; @@ -303,7 +302,7 @@ public void write(ByteBuffer buf, int offset) { @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); return new TagRewriteCell(clonedBaseCell, this.tags); } } @@ -482,7 +481,7 @@ public void write(ByteBuffer buf, int offset) { @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); if (clonedBaseCell instanceof ByteBufferExtendedCell) { return new TagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) clonedBaseCell, this.tags); @@ -545,7 +544,7 @@ static class ValueAndTagRewriteCell extends TagRewriteCell { protected byte[] value; - public ValueAndTagRewriteCell(Cell cell, byte[] value, byte[] tags) { + public ValueAndTagRewriteCell(ExtendedCell cell, byte[] value, byte[] tags) { super(cell, tags); this.value = value; } @@ -618,7 +617,7 @@ public void write(ByteBuffer buf, int offset) { * Made into a static method so as to reuse the logic within * ValueAndTagRewriteByteBufferExtendedCell */ - static void write(ByteBuffer buf, int offset, Cell cell, byte[] value, byte[] tags) { + static void write(ByteBuffer buf, int offset, ExtendedCell cell, byte[] value, byte[] tags) { offset = ByteBufferUtils.putInt(buf, offset, KeyValueUtil.keyLength(cell));// Key length offset = ByteBufferUtils.putInt(buf, offset, value.length);// Value length offset = KeyValueUtil.appendKeyTo(cell, buf, offset); @@ -633,7 +632,7 @@ static void write(ByteBuffer buf, int offset, Cell cell, byte[] value, byte[] ta @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); return new ValueAndTagRewriteCell(clonedBaseCell, this.value, this.tags); } } @@ -699,7 +698,7 @@ public void write(ByteBuffer buf, int offset) { @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = this.cell.deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); if (clonedBaseCell instanceof ByteBufferExtendedCell) { return new ValueAndTagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) clonedBaseCell, this.value, this.tags); @@ -837,7 +836,7 @@ public static boolean isDelete(final byte type) { } /** Returns True if this cell is a {@link KeyValue.Type#Delete} type. */ - public static boolean isDeleteType(Cell cell) { + public static boolean isDeleteType(ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.Delete.getCode(); } @@ -845,20 +844,20 @@ public static boolean isDeleteFamily(final Cell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteFamily.getCode(); } - public static boolean isDeleteFamilyVersion(final Cell cell) { + public static boolean isDeleteFamilyVersion(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode(); } - public static boolean isDeleteColumns(final Cell cell) { + public static boolean isDeleteColumns(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteColumn.getCode(); } - public static boolean isDeleteColumnVersion(final Cell cell) { + public static boolean isDeleteColumnVersion(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.Delete.getCode(); } /** Returns True if this cell is a delete family or column type. */ - public static boolean isDeleteColumnOrFamily(Cell cell) { + public static boolean isDeleteColumnOrFamily(ExtendedCell cell) { int t = cell.getTypeByte(); return t == KeyValue.Type.DeleteColumn.getCode() || t == KeyValue.Type.DeleteFamily.getCode(); } @@ -1185,8 +1184,9 @@ public static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFami * special API used in scan optimization. */ // compare a key against row/fam/qual/ts/type - public static final int compareKeyBasedOnColHint(CellComparator comparator, Cell nextIndexedCell, - Cell currentCell, int foff, int flen, byte[] colHint, int coff, int clen, long ts, byte type) { + public static final int compareKeyBasedOnColHint(CellComparator comparator, + ExtendedCell nextIndexedCell, ExtendedCell currentCell, int foff, int flen, byte[] colHint, + int coff, int clen, long ts, byte type) { int compare = comparator.compareRows(nextIndexedCell, currentCell); if (compare != 0) { return compare; @@ -2564,7 +2564,7 @@ public static BigDecimal getValueAsBigDecimal(Cell cell) { * @param tagCompressionContext the TagCompressionContext * @throws IOException can throw IOException if the compression encounters issue */ - public static void compressTags(OutputStream out, Cell cell, + public static void compressTags(OutputStream out, ExtendedCell cell, TagCompressionContext tagCompressionContext) throws IOException { if (cell instanceof ByteBufferExtendedCell) { tagCompressionContext.compressTags(out, ((ByteBufferExtendedCell) cell).getTagsByteBuffer(), @@ -2716,7 +2716,7 @@ static final int compareWithoutRow(CellComparator comparator, Cell left, byte[] * Return a new cell is located following input cell. If both of type and timestamp are minimum, * the input cell will be returned directly. */ - public static Cell createNextOnRowCol(Cell cell) { + public static ExtendedCell createNextOnRowCol(ExtendedCell cell) { long ts = cell.getTimestamp(); byte type = cell.getTypeByte(); if (type != KeyValue.Type.Minimum.getCode()) { @@ -2730,7 +2730,7 @@ public static Cell createNextOnRowCol(Cell cell) { return createNextOnRowCol(cell, ts, type); } - static Cell createNextOnRowCol(Cell cell, long ts, byte type) { + static ExtendedCell createNextOnRowCol(ExtendedCell cell, long ts, byte type) { if (cell instanceof ByteBufferExtendedCell) { return new LastOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2794,7 +2794,7 @@ public static int estimatedSerializedSizeOfKey(final Cell cell) { * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed * a null cell */ - public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) { + public static byte[] getCellKeySerializedAsKeyValueKey(final ExtendedCell cell) { if (cell == null) return null; byte[] b = new byte[KeyValueUtil.keyLength(cell)]; KeyValueUtil.appendKeyTo(cell, b, 0); @@ -2805,7 +2805,7 @@ public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) { * Create a Cell that is smaller than all other possible Cells for the given Cell's row. * @return First possible Cell on passed Cell's row. */ - public static Cell createFirstOnRow(final Cell cell) { + public static ExtendedCell createFirstOnRow(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2814,26 +2814,27 @@ public static Cell createFirstOnRow(final Cell cell) { return new FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); } - public static Cell createFirstOnRow(final byte[] row, int roffset, short rlength) { + public static ExtendedCell createFirstOnRow(final byte[] row, int roffset, short rlength) { return new FirstOnRowCell(row, roffset, rlength); } - public static Cell createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) { + public static ExtendedCell createFirstOnRow(final byte[] row, final byte[] family, + final byte[] col) { return createFirstOnRow(row, 0, (short) row.length, family, 0, (byte) family.length, col, 0, col.length); } - public static Cell createFirstOnRow(final byte[] row, int roffset, short rlength, + public static ExtendedCell createFirstOnRow(final byte[] row, int roffset, short rlength, final byte[] family, int foffset, byte flength, final byte[] col, int coffset, int clength) { return new FirstOnRowColCell(row, roffset, rlength, family, foffset, flength, col, coffset, clength); } - public static Cell createFirstOnRow(final byte[] row) { + public static ExtendedCell createFirstOnRow(final byte[] row) { return createFirstOnRow(row, 0, (short) row.length); } - public static Cell createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) { + public static ExtendedCell createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2844,7 +2845,7 @@ public static Cell createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, in fArray, foff, (byte) flen, HConstants.EMPTY_BYTE_ARRAY, 0, 0); } - public static Cell createFirstOnRowCol(final Cell cell) { + public static ExtendedCell createFirstOnRowCol(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2858,7 +2859,7 @@ public static Cell createFirstOnRowCol(final Cell cell) { cell.getQualifierLength()); } - public static Cell createFirstOnNextRow(final Cell cell) { + public static ExtendedCell createFirstOnNextRow(final Cell cell) { byte[] nextRow = new byte[cell.getRowLength() + 1]; CellUtil.copyRowTo(cell, nextRow, 0); nextRow[nextRow.length - 1] = 0;// maybe not necessary @@ -2870,7 +2871,8 @@ public static Cell createFirstOnNextRow(final Cell cell) { * passed qualifier. * @return Last possible Cell on passed Cell's rk:cf and passed qualifier. */ - public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) { + public static ExtendedCell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, + int qlength) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2890,7 +2892,7 @@ public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffe * combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored. * @param cell - cell */ - public static Cell createFirstOnRowColTS(Cell cell, long ts) { + public static ExtendedCell createFirstOnRowColTS(Cell cell, long ts) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColTSByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2909,7 +2911,7 @@ public static Cell createFirstOnRowColTS(Cell cell, long ts) { * Create a Cell that is larger than all other possible Cells for the given Cell's row. * @return Last possible Cell on passed Cell's row. */ - public static Cell createLastOnRow(final Cell cell) { + public static ExtendedCell createLastOnRow(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new LastOnRowByteBufferExtendedCell(((ByteBufferExtendedCell) cell).getRowByteBuffer(), ((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength()); @@ -2917,7 +2919,7 @@ public static Cell createLastOnRow(final Cell cell) { return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); } - public static Cell createLastOnRow(final byte[] row) { + public static ExtendedCell createLastOnRow(final byte[] row) { return new LastOnRowCell(row, 0, (short) row.length); } @@ -2927,7 +2929,7 @@ public static Cell createLastOnRow(final byte[] row) { * we already know is not in the file. * @return Last possible Cell on passed Cell's rk:cf:q. */ - public static Cell createLastOnRowCol(final Cell cell) { + public static ExtendedCell createLastOnRowCol(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new LastOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2949,7 +2951,7 @@ public static Cell createLastOnRowCol(final Cell cell) { * @param fam - family name * @return First Delete Family possible key on passed row. */ - public static Cell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) { + public static ExtendedCell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) { return new FirstOnRowDeleteFamilyCell(row, fam); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index b4d26dbfee93..5ba344770a3d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -38,7 +38,7 @@ public interface RawCell extends Cell { * @return the byte[] having the tags */ default byte[] cloneTags() { - return PrivateCellUtil.cloneTags(this); + return PrivateCellUtil.cloneTags((ExtendedCell) this); } /** @@ -46,7 +46,7 @@ default byte[] cloneTags() { * @return a list of tags */ default Iterator getTags() { - return PrivateCellUtil.tagsIterator(this); + return PrivateCellUtil.tagsIterator((ExtendedCell) this); } /** @@ -55,7 +55,7 @@ default Iterator getTags() { * @return the specific tag if available or null */ default Optional getTag(byte type) { - return PrivateCellUtil.getTag(this, type); + return PrivateCellUtil.getTag((ExtendedCell) this, type); } /** @@ -71,6 +71,6 @@ public static void checkForTagsLength(int tagsLength) { /** Returns A new cell which is having the extra tags also added to it. */ public static Cell createCell(Cell cell, List tags) { - return PrivateCellUtil.createCell(cell, tags); + return PrivateCellUtil.createCell((ExtendedCell) cell, tags); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java index c551d2aabd54..b7b70e25ce40 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java @@ -21,7 +21,7 @@ import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -50,7 +50,7 @@ protected void postEncoding(HFileBlockEncodingContext encodingCtx) throws IOExce } } - protected Cell createFirstKeyCell(ByteBuffer key, int keyLength) { + protected ExtendedCell createFirstKeyCell(ByteBuffer key, int keyLength) { if (key.hasArray()) { return new KeyValue.KeyOnlyKeyValue(key.array(), key.arrayOffset() + key.position(), keyLength); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index 1794422d5cd9..5ec39fa5803d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -281,7 +281,7 @@ protected void copyFromNext(SeekerState nextState) { } } - public Cell toCell() { + public ExtendedCell toCell() { // Buffer backing the value and tags part from the HFileBlock's buffer // When tag compression in use, this will be only the value bytes area. ByteBuffer valAndTagsBuffer; @@ -304,7 +304,7 @@ public Cell toCell() { } } - private Cell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset, + private ExtendedCell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset, int tagsLenSerializationSize) { byte[] tagsArray = HConstants.EMPTY_BYTE_ARRAY; int tOffset = 0; @@ -326,7 +326,7 @@ private Cell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset, this.tagsLength); } - private Cell toOffheapCell(ByteBuffer valAndTagsBuffer, int vOffset, + private ExtendedCell toOffheapCell(ByteBuffer valAndTagsBuffer, int vOffset, int tagsLenSerializationSize) { ByteBuffer tagsBuf = HConstants.EMPTY_BYTE_BUFFER; int tOffset = 0; @@ -825,7 +825,7 @@ public BufferedEncodedSeeker(HFileBlockDecodingContext decodingCtx) { } @Override - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { keyOnlyKV.setKey(current.keyBuffer, 0, current.keyLength); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, keyOnlyKV); } @@ -853,7 +853,7 @@ public void setCurrentBuffer(ByteBuff buffer) { } @Override - public Cell getKey() { + public ExtendedCell getKey() { byte[] key = new byte[current.keyLength]; System.arraycopy(current.keyBuffer, 0, key, 0, current.keyLength); return new KeyValue.KeyOnlyKeyValue(key); @@ -869,7 +869,7 @@ public ByteBuffer getValueShallowCopy() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { return current.toCell(); } @@ -927,7 +927,7 @@ protected void decodeTags() { } @Override - public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { + public int seekToKeyInBlock(ExtendedCell seekCell, boolean seekBefore) { int rowCommonPrefix = 0; int familyCommonPrefix = 0; int qualCommonPrefix = 0; @@ -1020,7 +1020,7 @@ public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { return 1; } - private int compareTypeBytes(Cell key, Cell right) { + private int compareTypeBytes(ExtendedCell key, ExtendedCell right) { if ( key.getFamilyLength() + key.getQualifierLength() == 0 && key.getTypeByte() == KeyValue.Type.Minimum.getCode() @@ -1129,7 +1129,7 @@ protected STATE createSeekerState() { } /** Returns unencoded size added */ - protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out, + protected final int afterEncodingKeyValue(ExtendedCell cell, DataOutputStream out, HFileBlockDefaultEncodingContext encodingCtx) throws IOException { int size = 0; if (encodingCtx.getHFileContext().isIncludesTags()) { @@ -1245,7 +1245,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOut } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { EncodingState state = encodingCtx.getEncodingState(); int posBeforeEncode = out.size(); @@ -1253,8 +1253,8 @@ public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputS state.postCellEncode(encodedKvSize, out.size() - posBeforeEncode); } - public abstract int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCtx, - DataOutputStream out) throws IOException; + public abstract int internalEncode(ExtendedCell cell, + HFileBlockDefaultEncodingContext encodingCtx, DataOutputStream out) throws IOException; @Override public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java index 5abe65dc4f97..1ec21cea6664 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java @@ -21,7 +21,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; @@ -56,7 +56,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOut } @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { CopyKeyEncodingState state = (CopyKeyEncodingState) encodingContext.getEncodingState(); NoneEncoder encoder = state.encoder; @@ -64,7 +64,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { int keyLength = block.getIntAfterPosition(Bytes.SIZEOF_INT); int pos = 3 * Bytes.SIZEOF_INT; ByteBuffer key = block.asSubByteBuffer(pos + keyLength).duplicate(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index 52825b6c683d..78ebbce38306 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; @@ -55,7 +55,7 @@ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream * Encodes a KeyValue. After the encode, {@link EncodingState#postCellEncode(int, int)} needs to * be called to keep track of the encoded and unencoded data size */ - void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException; /** @@ -81,7 +81,7 @@ ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingContext dec * @param block encoded block we want index, the position will not change * @return First key in block as a cell. */ - Cell getFirstKeyCellInBlock(ByteBuff block); + ExtendedCell getFirstKeyCellInBlock(ByteBuff block); /** * Create a HFileBlock seeker which find KeyValues within a block. @@ -119,7 +119,7 @@ interface EncodedSeeker { * From the current position creates a cell using the key part of the current buffer * @return key at current position */ - Cell getKey(); + ExtendedCell getKey(); /** * Does a shallow copy of the value at the current position. A shallow copy is possible because @@ -129,7 +129,7 @@ interface EncodedSeeker { ByteBuffer getValueShallowCopy(); /** Returns the Cell at the current position. Includes memstore timestamp. */ - Cell getCell(); + ExtendedCell getCell(); /** Set position to beginning of given block */ void rewind(); @@ -154,12 +154,12 @@ interface EncodedSeeker { * Does not matter in case of an inexact match. * @return 0 on exact match, 1 on inexact match. */ - int seekToKeyInBlock(Cell key, boolean seekBefore); + int seekToKeyInBlock(ExtendedCell key, boolean seekBefore); /** * Compare the given key against the current key * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater */ - public int compareKey(CellComparator comparator, Cell key); + public int compareKey(CellComparator comparator, ExtendedCell key); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java index e865d0b12523..d58f5e2c923e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java @@ -21,7 +21,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -181,7 +181,7 @@ private void uncompressSingleKeyValue(DataInputStream source, ByteBuffer buffer, } @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { EncodingState state = encodingContext.getEncodingState(); int size = compressSingleKeyValue(out, cell, state.prevCell); @@ -190,7 +190,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo return size; } - private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell) + private int compressSingleKeyValue(DataOutputStream out, ExtendedCell cell, ExtendedCell prevCell) throws IOException { int flag = 0; // Do not use more bits that can fit into a byte int kLength = KeyValueUtil.keyLength(cell); @@ -291,7 +291,7 @@ private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCel } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT); byte familyLength = block.get(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java index 8aeb1824eb30..8ab4e320552e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.encoding; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -30,7 +30,7 @@ public class EncodingState { /** * The previous Cell the encoder encoded. */ - protected Cell prevCell = null; + protected ExtendedCell prevCell = null; // Size of actual data being written. Not considering the block encoding/compression. This // includes the header size also. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java index df3d6c34216b..26b695abfca9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java @@ -22,7 +22,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -215,7 +215,7 @@ private void uncompressSingleKeyValue(DataInputStream source, ByteBuffer out, } @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { EncodingState state = encodingContext.getEncodingState(); int size = compressSingleKeyValue(out, cell, state.prevCell); @@ -224,7 +224,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo return size; } - private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell) + private int compressSingleKeyValue(DataOutputStream out, ExtendedCell cell, ExtendedCell prevCell) throws IOException { int flag = 0; // Do not use more bits than will fit into a byte int kLength = KeyValueUtil.keyLength(cell); @@ -330,7 +330,7 @@ protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocat } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE); int keyLength = ByteBuff.readCompressedInt(block); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 820dd6179542..e9858b5ffba1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -22,7 +22,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -42,7 +42,7 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder { @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { int klength = KeyValueUtil.keyLength(cell); int vlength = cell.getValueLength(); @@ -69,7 +69,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo return size; } - private void writeKeyExcludingCommon(Cell cell, int commonPrefix, DataOutputStream out) + private void writeKeyExcludingCommon(ExtendedCell cell, int commonPrefix, DataOutputStream out) throws IOException { short rLen = cell.getRowLength(); if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) { @@ -162,7 +162,7 @@ private int decodeKeyValue(DataInputStream source, ByteBuffer buffer, int prevKe } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT); int keyLength = ByteBuff.readCompressedInt(block); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java index 832fd93db712..9f2014331089 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java @@ -23,7 +23,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -72,7 +72,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOut } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { RowIndexEncodingState state = (RowIndexEncodingState) encodingCtx.getEncodingState(); RowIndexEncoderV1 encoder = state.encoder; @@ -104,7 +104,7 @@ public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingCont } else { RowIndexSeekerV1 seeker = new RowIndexSeekerV1(decodingCtx); seeker.setCurrentBuffer(new SingleByteBuff(sourceAsBuffer)); - List kvs = new ArrayList<>(); + List kvs = new ArrayList<>(); kvs.add(seeker.getCell()); while (seeker.next()) { kvs.add(seeker.getCell()); @@ -112,7 +112,7 @@ public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingCont boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try (DataOutputStream out = new DataOutputStream(baos)) { - for (Cell cell : kvs) { + for (ExtendedCell cell : kvs) { KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(cell); out.write(currentCell.getBuffer(), currentCell.getOffset(), currentCell.getLength()); if (includesMvcc) { @@ -126,7 +126,7 @@ public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingCont } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); int keyLength = block.getInt(); block.getInt(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java index 028473e0897e..7ec4f767ccb8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java @@ -19,7 +19,7 @@ import java.io.DataOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.util.Bytes; @@ -32,7 +32,7 @@ public class RowIndexEncoderV1 { private static final Logger LOG = LoggerFactory.getLogger(RowIndexEncoderV1.class); /** The Cell previously appended. */ - private Cell lastCell = null; + private ExtendedCell lastCell = null; private DataOutputStream out; private NoneEncoder encoder; @@ -46,7 +46,7 @@ public RowIndexEncoderV1(DataOutputStream out, HFileBlockDefaultEncodingContext this.context = encodingCtx; } - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { // checkRow uses comparator to check we are writing in order. int extraBytesForRowIndex = 0; @@ -63,7 +63,7 @@ public void write(Cell cell) throws IOException { context.getEncodingState().postCellEncode(size, size + extraBytesForRowIndex); } - protected boolean checkRow(final Cell cell) throws IOException { + protected boolean checkRow(final ExtendedCell cell) throws IOException { boolean isDuplicateRow = false; if (cell == null) { throw new IOException("Key cannot be null or empty"); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java index e283803a143b..89bac4a609e9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -81,7 +82,7 @@ public void setCurrentBuffer(ByteBuff buffer) { @Override @SuppressWarnings("ByteBufferBackingArray") - public Cell getKey() { + public ExtendedCell getKey() { if (current.keyBuffer.hasArray()) { return new KeyValue.KeyOnlyKeyValue(current.keyBuffer.array(), current.keyBuffer.arrayOffset() + current.keyBuffer.position(), current.keyLength); @@ -103,7 +104,7 @@ public ByteBuffer getValueShallowCopy() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { return current.toCell(); } @@ -164,7 +165,7 @@ private ByteBuffer getRow(int index) { } @Override - public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { + public int seekToKeyInBlock(ExtendedCell seekCell, boolean seekBefore) { previous.invalidate(); int index = binarySearch(seekCell, seekBefore); if (index < 0) { @@ -230,7 +231,7 @@ private void moveToPrevious() { } @Override - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, current.currentKey); } @@ -343,8 +344,8 @@ protected int getCellBufSize() { return kvBufSize; } - public Cell toCell() { - Cell ret; + public ExtendedCell toCell() { + ExtendedCell ret; int cellBufSize = getCellBufSize(); long seqId = 0L; if (includesMvcc()) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java index 0f9203facff9..b39e9cf20ac0 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java @@ -26,8 +26,8 @@ import java.util.Random; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.io.WritableUtils; @@ -270,8 +270,8 @@ public List generateTestKeyValues(int howMany, boolean useTags) { * @param howMany How many Key values should be generated. * @return sorted list of key values */ - public List generateTestExtendedOffheapKeyValues(int howMany, boolean useTags) { - List result = new ArrayList<>(); + public List generateTestExtendedOffheapKeyValues(int howMany, boolean useTags) { + List result = new ArrayList<>(); List rows = generateRows(); Map> rowsToQualifier = new HashMap<>(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 2e288f246808..225a2dc6fe2b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; @@ -320,8 +321,8 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { } // we now have the proper WAL writer. full steam ahead - PrivateCellUtil.updateLatestStamp(cell, this.now); - wl.writer.append(kv); + PrivateCellUtil.updateLatestStamp(kv, this.now); + wl.writer.append((ExtendedCell) kv); wl.written += length; // Copy the row so we know when a row transition. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 5ab4e5a292e9..90dc5c1d555f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; @@ -62,7 +63,7 @@ protected void reduce(K row, Iterable vals, Context context) List cells = familyMap.get(entry.getKey()); List kvs = (cells != null) ? (List) cells : null; for (Cell cell : entry.getValue()) { - KeyValue kv = KeyValueUtil.ensureKeyValue(cell); + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); curSize += kv.heapSize(); if (kvs != null) { kvs.add(kv); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index 90905090f89d..b4061d6be6a9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; @@ -100,7 +101,7 @@ protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, throw new IOException("Invalid visibility expression found in mutation " + p, e); } for (List cells : p.getFamilyCellMap().values()) { - for (Cell cell : cells) { + for (ExtendedCell cell : (List) (List) cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 2fba01978581..b374aa86c018 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -26,8 +26,8 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; @@ -168,10 +168,10 @@ protected void reduce(ImmutableBytesWritable rowKey, java.lang.Iterable li } // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. - Cell cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), - parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, - parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); + ExtendedCell cell = (ExtendedCell) this.kvCreator.create(lineBytes, + parsed.getRowKeyOffset(), parsed.getRowKeyLength(), parser.getFamily(i), 0, + parser.getFamily(i).length, parser.getQualifier(i), 0, parser.getQualifier(i).length, + ts, lineBytes, parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); kvs.add(kv); curSize += kv.heapSize(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 4d3149b9d8ea..5e3e52de6ad4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -884,7 +885,7 @@ public void preBatchMutate(ObserverContext c, Cell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + Cell updatedCell = PrivateCellUtil.createCell((ExtendedCell) cell, tags); updatedCells.add(updatedCell); } m.getFamilyCellMap().clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 862fbc69809d..55a738020040 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -23,7 +23,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.IntConsumer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -60,9 +60,9 @@ public class HalfStoreFileReader extends StoreFileReader { // i.e. empty column and a timestamp of LATEST_TIMESTAMP. protected final byte[] splitkey; - private final Cell splitCell; + private final ExtendedCell splitCell; - private Optional firstKey = Optional.empty(); + private Optional firstKey = Optional.empty(); private boolean firstKeySeeked = false; @@ -104,21 +104,27 @@ protected HFileScanner getScanner(final boolean cacheBlocks, final boolean pread public boolean atEnd = false; @Override - public Cell getKey() { - if (atEnd) return null; + public ExtendedCell getKey() { + if (atEnd) { + return null; + } return delegate.getKey(); } @Override public ByteBuffer getValue() { - if (atEnd) return null; + if (atEnd) { + return null; + } return delegate.getValue(); } @Override - public Cell getCell() { - if (atEnd) return null; + public ExtendedCell getCell() { + if (atEnd) { + return null; + } return delegate.getCell(); } @@ -177,7 +183,7 @@ public boolean isSeeked() { } @Override - public int seekTo(Cell key) throws IOException { + public int seekTo(ExtendedCell key) throws IOException { if (top) { if (PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) < 0) { return -1; @@ -199,10 +205,9 @@ public int seekTo(Cell key) throws IOException { } @Override - public int reseekTo(Cell key) throws IOException { + public int reseekTo(ExtendedCell key) throws IOException { // This function is identical to the corresponding seekTo function - // except - // that we call reseekTo (and not seekTo) on the delegate. + // except that we call reseekTo (and not seekTo) on the delegate. if (top) { if (PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) < 0) { return -1; @@ -227,9 +232,9 @@ public int reseekTo(Cell key) throws IOException { } @Override - public boolean seekBefore(Cell key) throws IOException { + public boolean seekBefore(ExtendedCell key) throws IOException { if (top) { - Optional fk = getFirstKey(); + Optional fk = getFirstKey(); if ( fk.isPresent() && PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0 @@ -255,7 +260,7 @@ public boolean seekBefore(Cell key) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } @@ -282,7 +287,7 @@ public boolean passesKeyRangeFilter(Scan scan) { } @Override - public Optional getLastKey() { + public Optional getLastKey() { if (top) { return super.getLastKey(); } @@ -303,13 +308,13 @@ public Optional getLastKey() { } @Override - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { // Returns null to indicate file is not splitable. return Optional.empty(); } @Override - public Optional getFirstKey() { + public Optional getFirstKey() { if (!firstKeySeeked) { HFileScanner scanner = getScanner(true, true, false); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java index c340254e07c9..37001d93b12d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -31,9 +31,9 @@ public class BlockWithScanInfo { * The first key in the next block following this one in the HFile. If this key is unknown, this * is reference-equal with HConstants.NO_NEXT_INDEXED_KEY */ - private final Cell nextIndexedKey; + private final ExtendedCell nextIndexedKey; - public BlockWithScanInfo(HFileBlock hFileBlock, Cell nextIndexedKey) { + public BlockWithScanInfo(HFileBlock hFileBlock, ExtendedCell nextIndexedKey) { this.hFileBlock = hFileBlock; this.nextIndexedKey = nextIndexedKey; } @@ -42,7 +42,7 @@ public HFileBlock getHFileBlock() { return hFileBlock; } - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return nextIndexedKey; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index bb253e050fe1..78860950be7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.BloomType; @@ -60,7 +61,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase /** The size of individual Bloom filter chunks to create */ private int chunkByteSize; /** The prev Cell that was processed */ - private Cell prevCell; + private ExtendedCell prevCell; /** A Bloom filter chunk enqueued for writing */ private static class ReadyChunk { @@ -146,7 +147,7 @@ private void enqueueReadyChunk(boolean closing) { } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { Objects.requireNonNull(cell); enqueueReadyChunk(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index ae79ad857244..135c6cfecbcc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -32,8 +32,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.MetricsIO; @@ -395,15 +395,15 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException; - Optional getLastKey(); + Optional getLastKey(); - Optional midKey() throws IOException; + Optional midKey() throws IOException; long length(); long getEntries(); - Optional getFirstKey(); + Optional getFirstKey(); long indexSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b24976707c33..91678d58b6e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -894,7 +894,7 @@ DataOutputStream startWriting(BlockType newBlockType) throws IOException { /** * Writes the Cell to this block */ - void write(Cell cell) throws IOException { + void write(ExtendedCell cell) throws IOException { expectState(State.WRITING); this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 592c19c866cf..816beea8a45f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -145,7 +146,7 @@ public byte[] getRootBlockKey(int i) { } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException { @@ -221,9 +222,9 @@ public String toString() { */ static class CellBasedKeyBlockIndexReader extends BlockIndexReader { - private Cell[] blockKeys; + private ExtendedCell[] blockKeys; /** Pre-computed mid-key */ - private AtomicReference midKey = new AtomicReference<>(); + private AtomicReference midKey = new AtomicReference<>(); /** Needed doing lookup on blocks. */ protected CellComparator comparator; @@ -258,12 +259,12 @@ public boolean isEmpty() { /** * from 0 to {@link #getRootBlockCount() - 1} */ - public Cell getRootBlockKey(int i) { + public ExtendedCell getRootBlockKey(int i) { return blockKeys[i]; } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException { @@ -273,7 +274,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } // the next indexed key - Cell nextIndexedKey = null; + ExtendedCell nextIndexedKey = null; // Read the next-level (intermediate or leaf) index block. long currentOffset = blockOffsets[rootLevelIndex]; @@ -381,10 +382,12 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } @Override - public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) throw new IOException("HFile empty"); + public ExtendedCell midkey(CachingBlockReader cachingBlockReader) throws IOException { + if (rootCount == 0) { + throw new IOException("HFile empty"); + } - Cell targetMidKey = this.midKey.get(); + ExtendedCell targetMidKey = this.midKey.get(); if (targetMidKey != null) { return targetMidKey; } @@ -416,7 +419,7 @@ public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { @Override protected void initialize(int numEntries) { - blockKeys = new Cell[numEntries]; + blockKeys = new ExtendedCell[numEntries]; } /** @@ -501,7 +504,7 @@ public boolean isEmpty() { } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException { @@ -510,14 +513,14 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } @Override - public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { + public ExtendedCell midkey(CachingBlockReader cachingBlockReader) throws IOException { return seeker.midkey(cachingBlockReader); } /** * from 0 to {@link #getRootBlockCount() - 1} */ - public Cell getRootBlockKey(int i) { + public ExtendedCell getRootBlockKey(int i) { return seeker.getRootBlockKey(i); } @@ -601,9 +604,10 @@ public void ensureNonEmpty() { * the block irrespective of the encoding * @return reader a basic way to load blocks */ - public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, - boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + public HFileBlock seekToDataBlock(final ExtendedCell key, HFileBlock currentBlock, + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction, expectedDataBlockEncoding, cachingBlockReader); if (blockWithScanInfo == null) { @@ -625,8 +629,8 @@ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boole * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as * nextIndexedKey. */ - public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, + public abstract BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, + HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 1629536c1488..f05b5415f02e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -20,7 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; @@ -47,7 +47,7 @@ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream /** * Encodes a KeyValue. */ - void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index 6505e3d33fe8..fd1c1adb0d55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -20,7 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -90,7 +90,7 @@ public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { this.encoding.getEncoder().encode(cell, encodingCtx, out); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java index a84204cadf1b..99ee8f3554c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding; @@ -58,15 +59,15 @@ void initRootIndex(HFileBlock blk, int numEntries, CellComparator comparator, in boolean isEmpty(); - Cell getRootBlockKey(int i); + ExtendedCell getRootBlockKey(int i); int getRootBlockCount(); - Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException; + ExtendedCell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException; int rootBlockContainingKey(Cell key); - BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, HFile.CachingBlockReader cachingBlockReader) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 31e637a0099f..fd10df1b9a67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -92,7 +93,7 @@ public class HFileInfo implements SortedMap { static final int MAX_MINOR_VERSION = 3; /** Last key in the file. Filled in when we read in the file info */ - private Cell lastKeyCell = null; + private ExtendedCell lastKeyCell = null; /** Average key length read from file info */ private int avgKeyLen = -1; /** Average value length read from file info */ @@ -512,7 +513,7 @@ public List getLoadOnOpenBlocks() { return loadOnOpenBlocks; } - public Cell getLastKeyCell() { + public ExtendedCell getLastKeyCell() { return lastKeyCell; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index c66a709fe494..64215333371f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -189,7 +190,7 @@ public long length() { * the first row key, but rather the byte form of the first KeyValue. */ @Override - public Optional getFirstKey() { + public Optional getFirstKey() { if (dataBlockIndexReader == null) { throw new BlockIndexNotLoadedException(path); } @@ -331,7 +332,7 @@ public static class HFileScannerImpl implements HFileScanner { * last data block. If the nextIndexedKey is null, it means the nextIndexedKey has not been * loaded yet. */ - protected Cell nextIndexedKey; + protected ExtendedCell nextIndexedKey; // Current block being used. NOTICE: DON't release curBlock separately except in shipped() or // close() methods. Because the shipped() or close() will do the release finally, even if any @@ -628,17 +629,17 @@ protected int blockSeek(Cell key, boolean seekBefore) { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return nextIndexedKey; } @Override - public int seekTo(Cell key) throws IOException { + public int seekTo(ExtendedCell key) throws IOException { return seekTo(key, true); } @Override - public int reseekTo(Cell key) throws IOException { + public int reseekTo(ExtendedCell key) throws IOException { int compared; if (isSeeked()) { compared = compareKey(reader.getComparator(), key); @@ -679,7 +680,7 @@ public int reseekTo(Cell key) throws IOException { * key, 1 if we are past the given key -2 if the key is earlier than the first key of * the file while using a faked index key */ - public int seekTo(Cell key, boolean rewind) throws IOException { + public int seekTo(ExtendedCell key, boolean rewind) throws IOException { HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, curBlock, cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); @@ -692,13 +693,13 @@ public int seekTo(Cell key, boolean rewind) throws IOException { } @Override - public boolean seekBefore(Cell key) throws IOException { + public boolean seekBefore(ExtendedCell key) throws IOException { HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, curBlock, cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), reader); if (seekToBlock == null) { return false; } - Cell firstKey = getFirstKeyCellInBlock(seekToBlock); + ExtendedCell firstKey = getFirstKeyCellInBlock(seekToBlock); if (PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), firstKey, key) >= 0) { long previousBlockOffset = seekToBlock.getPrevBlockOffset(); // The key we are interested in @@ -778,12 +779,12 @@ public DataBlockEncoding getEffectiveDataBlockEncoding() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { if (!isSeeked()) { return null; } - Cell ret; + ExtendedCell ret; int cellBufSize = getKVBufSize(); long seqId = 0L; if (this.reader.getHFileInfo().shouldIncludeMemStoreTS()) { @@ -823,7 +824,7 @@ public Cell getCell() { } @Override - public Cell getKey() { + public ExtendedCell getKey() { assertSeeked(); // Create a new object so that this getKey is cached as firstKey, lastKey ObjectIntPair keyPair = new ObjectIntPair<>(); @@ -973,8 +974,8 @@ protected void readAndUpdateNewBlock(long firstDataBlockOffset) throws IOExcepti updateCurrentBlock(newBlock); } - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, - Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, ExtendedCell nextIndexedKey, + boolean rewind, ExtendedCell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1032,7 +1033,7 @@ protected void updateCurrentBlock(HFileBlock newBlock) throws IOException { this.nextIndexedKey = null; } - protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { + protected ExtendedCell getFirstKeyCellInBlock(HFileBlock curBlock) { ByteBuff buffer = curBlock.getBufferWithoutHeader(); // It is safe to manipulate this buffer because we own the buffer object. buffer.rewind(); @@ -1047,7 +1048,7 @@ protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { } } - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair); this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen, rowLen); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, this.bufBackedKeyOnlyKv); @@ -1432,7 +1433,7 @@ private void validateBlockType(HFileBlock block, BlockType expectedBlockType) th * the last row key, but it is the Cell representation of the last key */ @Override - public Optional getLastKey() { + public Optional getLastKey() { return dataBlockIndexReader.isEmpty() ? Optional.empty() : Optional.of(fileInfo.getLastKeyCell()); @@ -1443,7 +1444,7 @@ public Optional getLastKey() { * approximation only. */ @Override - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { return Optional.ofNullable(dataBlockIndexReader.midkey(this)); } @@ -1552,7 +1553,7 @@ public boolean next() throws IOException { } @Override - public Cell getKey() { + public ExtendedCell getKey() { assertValidSeek(); return seeker.getKey(); } @@ -1564,7 +1565,7 @@ public ByteBuffer getValue() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { if (this.curBlock == null) { return null; } @@ -1578,13 +1579,13 @@ private void assertValidSeek() { } @Override - protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { + protected ExtendedCell getFirstKeyCellInBlock(HFileBlock curBlock) { return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock)); } @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, - Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, ExtendedCell nextIndexedKey, + boolean rewind, ExtendedCell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1595,7 +1596,7 @@ protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, } @Override - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { return seeker.compareKey(comparator, key); } } @@ -1658,9 +1659,9 @@ public boolean prefetchStarted() { /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up - * in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this - * overload of getScanner for compactions. See + * {@link HFileScanner#seekTo(ExtendedCell)} to position an start the read. There is nothing to + * clean up in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not + * use this overload of getScanner for compactions. See * {@link #getScanner(Configuration, boolean, boolean, boolean)} * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. @@ -1675,8 +1676,8 @@ public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final bo /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up - * in a Scanner. Letting go of your references to the scanner is sufficient. + * {@link HFileScanner#seekTo(ExtendedCell)} to position an start the read. There is nothing to + * clean up in a Scanner. Letting go of your references to the scanner is sufficient. * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. * @param pread Use positional read rather than seek+read if true (pread is better for diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index b0022788c38f..c24b64f4f77d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.function.IntConsumer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.Shipper; import org.apache.yetus.audience.InterfaceAudience; @@ -30,7 +30,7 @@ * reposition yourself as well. *

    * A scanner doesn't always have a key/value that it is pointing to when it is first created and - * before {@link #seekTo()}/{@link #seekTo(Cell)} are called. In this case, + * before {@link #seekTo()}/{@link #seekTo(ExtendedCell)} are called. In this case, * {@link #getKey()}/{@link #getValue()} returns null. At most other times, a key and value will be * available. The general pattern is that you position the Scanner using the seekTo variants and * then getKey and getValue. @@ -48,7 +48,7 @@ public interface HFileScanner extends Shipper, Closeable { * will position itself at the end of the file and next() will return false when it is * called. */ - int seekTo(Cell cell) throws IOException; + int seekTo(ExtendedCell cell) throws IOException; /** * Reseek to or just before the passed cell. Similar to seekTo except that this can @@ -63,7 +63,7 @@ public interface HFileScanner extends Shipper, Closeable { * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in * position i; and 1, such that c[i] < cell, and scanner is left in position i. */ - int reseekTo(Cell cell) throws IOException; + int reseekTo(ExtendedCell cell) throws IOException; /** * Consider the cell stream of all the cells in the file, c[0] .. c[n], where there @@ -73,7 +73,7 @@ public interface HFileScanner extends Shipper, Closeable { * cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but * there may also NOT be a c[i+1], and next() will return false (EOF). */ - boolean seekBefore(Cell cell) throws IOException; + boolean seekBefore(ExtendedCell cell) throws IOException; /** * Positions this scanner at the start of the file. @@ -89,34 +89,35 @@ public interface HFileScanner extends Shipper, Closeable { boolean next() throws IOException; /** - * Gets the current key in the form of a cell. You must call {@link #seekTo(Cell)} before this - * method. + * Gets the current key in the form of a cell. You must call {@link #seekTo(ExtendedCell)} before + * this method. * @return gets the current key as a Cell. */ - Cell getKey(); + ExtendedCell getKey(); /** - * Gets a buffer view to the current value. You must call {@link #seekTo(Cell)} before this - * method. + * Gets a buffer view to the current value. You must call {@link #seekTo(ExtendedCell)} before + * this method. * @return byte buffer for the value. The limit is set to the value size, and the position is 0, * the start of the buffer view. */ ByteBuffer getValue(); - /** Returns Instance of {@link org.apache.hadoop.hbase.Cell}. */ - Cell getCell(); + /** Returns Instance of {@link ExtendedCell}. */ + ExtendedCell getCell(); /** Returns Reader that underlies this Scanner instance. */ HFile.Reader getReader(); /** - * @return True is scanner has had one of the seek calls invoked; i.e. {@link #seekBefore(Cell)} - * or {@link #seekTo()} or {@link #seekTo(Cell)}. Otherwise returns false. + * @return True is scanner has had one of the seek calls invoked; i.e. + * {@link #seekBefore(ExtendedCell)} or {@link #seekTo()} or + * {@link #seekTo(ExtendedCell)}. Otherwise returns false. */ boolean isSeeked(); /** Returns the next key in the index (the key to seek to the next block) */ - Cell getNextIndexedKey(); + ExtendedCell getNextIndexedKey(); /** * Close this HFile scanner and do necessary cleanup. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index d2dfaf62106a..0f54fafba954 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -75,7 +76,7 @@ public class HFileWriterImpl implements HFile.Writer { private final int encodedBlockSizeLimit; /** The Cell previously appended. Becomes the last cell in the file. */ - protected Cell lastCell = null; + protected ExtendedCell lastCell = null; /** FileSystem stream to write into. */ protected FSDataOutputStream outputStream; @@ -112,7 +113,7 @@ public class HFileWriterImpl implements HFile.Writer { /** * First cell in a block. This reference should be short-lived since we write hfiles in a burst. */ - protected Cell firstCellInBlock = null; + protected ExtendedCell firstCellInBlock = null; /** May be null if we were passed a stream. */ protected final Path path; @@ -163,7 +164,7 @@ public class HFileWriterImpl implements HFile.Writer { * The last(stop) Cell of the previous data block. This reference should be short-lived since we * write hfiles in a burst. */ - private Cell lastCellOfPreviousBlock = null; + private ExtendedCell lastCellOfPreviousBlock = null; /** Additional data items to be written to the "load-on-open" section. */ private List additionalLoadOnOpenData = new ArrayList<>(); @@ -360,7 +361,7 @@ private void finishBlock() throws IOException { lastDataBlockOffset = outputStream.getPos(); blockWriter.writeHeaderAndData(outputStream); int onDiskSize = blockWriter.getOnDiskSizeWithHeader(); - Cell indexEntry = + ExtendedCell indexEntry = getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, firstCellInBlock); dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); @@ -377,8 +378,8 @@ private void finishBlock() throws IOException { * cell. * @return A cell that sorts between left and right. */ - public static Cell getMidpoint(final CellComparator comparator, final Cell left, - final Cell right) { + public static ExtendedCell getMidpoint(final CellComparator comparator, final ExtendedCell left, + final ExtendedCell right) { if (right == null) { throw new IllegalArgumentException("right cell can not be null"); } @@ -733,7 +734,7 @@ public HFileContext getFileContext() { * construction. Cell to add. Cannot be empty nor null. */ @Override - public void append(final Cell cell) throws IOException { + public void append(final ExtendedCell cell) throws IOException { // checkKey uses comparator to check we are writing in order. boolean dupKey = checkKey(cell); if (!dupKey) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index d64f0e4ce53d..002b26295f33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -20,7 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodingState; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -47,7 +47,7 @@ private NoOpDataBlockEncoder() { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { NoneEncodingState state = (NoneEncodingState) encodingCtx.getEncodingState(); NoneEncoder encoder = state.encoder; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java index 4162fca6afe5..0d9767f62210 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding; @@ -138,12 +139,12 @@ protected static class NoOpEncodedSeeker implements EncodedSeeker { protected int midLeafBlockOnDiskSize = -1; protected int midKeyEntry = -1; - private Cell[] blockKeys; + private ExtendedCell[] blockKeys; private CellComparator comparator; protected int searchTreeLevel; /** Pre-computed mid-key */ - private AtomicReference midKey = new AtomicReference<>(); + private AtomicReference midKey = new AtomicReference<>(); @Override public long heapSize() { @@ -184,7 +185,7 @@ public boolean isEmpty() { } @Override - public Cell getRootBlockKey(int i) { + public ExtendedCell getRootBlockKey(int i) { return blockKeys[i]; } @@ -238,7 +239,7 @@ private void readRootIndex(DataInput in, final int numEntries) throws IOExceptio } private void initialize(int numEntries) { - blockKeys = new Cell[numEntries]; + blockKeys = new ExtendedCell[numEntries]; } private void add(final byte[] key, final long offset, final int dataSize) { @@ -250,10 +251,12 @@ private void add(final byte[] key, final long offset, final int dataSize) { } @Override - public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) throw new IOException("HFile empty"); + public ExtendedCell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException { + if (rootCount == 0) { + throw new IOException("HFile empty"); + } - Cell targetMidKey = this.midKey.get(); + ExtendedCell targetMidKey = this.midKey.get(); if (targetMidKey != null) { return targetMidKey; } @@ -285,7 +288,7 @@ public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOExcepti } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, HFile.CachingBlockReader cachingBlockReader) throws IOException { @@ -295,7 +298,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } // the next indexed key - Cell nextIndexedKey = null; + ExtendedCell nextIndexedKey = null; // Read the next-level (intermediate or leaf) index block. long currentOffset = blockOffsets[rootLevelIndex]; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index 44f77b62ad8b..5f0538eb7065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -34,8 +34,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -324,7 +324,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel compactMOBs, this.ioOptimizedMode, ioOptimizedMode, maxMobFileSize, major, getStoreInfo()); // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME long currentTime = EnvironmentEdgeManager.currentTime(); long lastMillis = 0; @@ -354,7 +354,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long shippedCallSizeLimit = (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); - Cell mobCell = null; + ExtendedCell mobCell = null; List committedMobWriterFileNames = new ArrayList<>(); try { @@ -362,7 +362,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); do { - hasMore = scanner.next(cells, scannerContext); + hasMore = scanner.next((List) cells, scannerContext); currentTime = EnvironmentEdgeManager.currentTime(); if (LOG.isDebugEnabled()) { now = currentTime; @@ -371,7 +371,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel progress.cancel(); return false; } - for (Cell c : cells) { + for (ExtendedCell c : cells) { if (compactMOBs) { if (MobUtils.isMobReferenceCell(c)) { String fName = MobUtils.getMobFileName(c); @@ -525,7 +525,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel mobCells++; // append the original keyValue in the mob file. mobFileWriter.append(c); - Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); + ExtendedCell reference = + MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); // write the cell whose value is the path of a mob file to the store file. writer.append(reference); cellsCountCompactedToMob++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java index e7b0f8260822..f8a55abde115 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java @@ -27,7 +27,7 @@ import java.util.function.Consumer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; @@ -192,7 +192,7 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); boolean control = @@ -205,9 +205,9 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, mobRefSet.get().clear(); try { do { - hasMore = scanner.next(cells, scannerContext); + hasMore = scanner.next((List) cells, scannerContext); if (!cells.isEmpty()) { - for (Cell c : cells) { + for (ExtendedCell c : cells) { // If we know that this KV is going to be included always, then let us // set its memstoreTS to 0. This will help us save space when writing to // disk. @@ -223,7 +223,7 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, mobCount++; // append the tags to the KeyValue. // The key is same, the value is the filename of the mob file - Cell reference = + ExtendedCell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); writer.append(reference); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java index f55088ea6be5..fe66535ee55e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java @@ -20,6 +20,7 @@ import java.io.Closeable; import java.io.IOException; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.yetus.audience.InterfaceAudience; @@ -45,20 +46,20 @@ @InterfaceAudience.Private public class MobCell implements Closeable { - private final Cell cell; + private final ExtendedCell cell; private final StoreFileScanner sfScanner; - public MobCell(Cell cell) { + public MobCell(ExtendedCell cell) { this.cell = cell; this.sfScanner = null; } - public MobCell(Cell cell, StoreFileScanner sfScanner) { + public MobCell(ExtendedCell cell, StoreFileScanner sfScanner) { this.cell = cell; this.sfScanner = sfScanner; } - public Cell getCell() { + public ExtendedCell getCell() { return cell; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java index 3293208771ac..102617ae74df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStoreFile; @@ -66,7 +66,7 @@ public StoreFileScanner getScanner() throws IOException { * @param cacheMobBlocks Should this scanner cache blocks. * @return The cell in the mob file. */ - public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException { + public MobCell readCell(ExtendedCell search, boolean cacheMobBlocks) throws IOException { return readCell(search, cacheMobBlocks, sf.getMaxMemStoreTS()); } @@ -77,7 +77,8 @@ public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException * @param readPt the read point. * @return The cell in the mob file. */ - public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException { + public MobCell readCell(ExtendedCell search, boolean cacheMobBlocks, long readPt) + throws IOException { StoreFileScanner scanner = null; boolean succ = false; try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index 60f0f126ab60..2fadc83340ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -497,7 +498,8 @@ public static boolean removeMobFiles(Configuration conf, FileSystem fs, TableNam * snapshot. * @return The mob reference KeyValue. */ - public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag) { + public static ExtendedCell createMobRefCell(ExtendedCell cell, byte[] fileName, + Tag tableNameTag) { // Append the tags to the KeyValue. // The key is same, the value is the filename of the mob file List tags = new ArrayList<>(); @@ -512,7 +514,8 @@ public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag return createMobRefCell(cell, fileName, TagUtil.fromList(tags)); } - public static Cell createMobRefCell(Cell cell, byte[] fileName, byte[] refCellTags) { + public static ExtendedCell createMobRefCell(ExtendedCell cell, byte[] fileName, + byte[] refCellTags) { byte[] refValue = Bytes.add(Bytes.toBytes(cell.getValueLength()), fileName); return PrivateCellUtil.createCell(cell, refValue, TagUtil.concatTags(refCellTags, cell)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java index b76680d0fdbe..24598c12bd1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; @@ -102,7 +103,7 @@ protected int doWork() throws Exception { out.println( String.format(KEY_TMPL, sequenceId, FORMATTER.format(Instant.ofEpochMilli(writeTime)))); for (Cell cell : edit.getCells()) { - Map op = WALPrettyPrinter.toStringMap(cell); + Map op = WALPrettyPrinter.toStringMap((ExtendedCell) cell); if ( !Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 9a88cab450af..9e15358c3673 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -22,7 +22,6 @@ import java.util.NavigableSet; import java.util.SortedSet; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; @@ -111,14 +110,14 @@ protected void resetTimeOfOldestEdit() { public abstract void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent); @Override - public void add(Iterable cells, MemStoreSizing memstoreSizing) { - for (Cell cell : cells) { + public void add(Iterable cells, MemStoreSizing memstoreSizing) { + for (ExtendedCell cell : cells) { add(cell, memstoreSizing); } } @Override - public void add(Cell cell, MemStoreSizing memstoreSizing) { + public void add(ExtendedCell cell, MemStoreSizing memstoreSizing) { doAddOrUpsert(cell, 0, memstoreSizing, true); } @@ -131,11 +130,11 @@ public void add(Cell cell, MemStoreSizing memstoreSizing) { * @param readpoint readpoint below which we can safely remove duplicate KVs * @param memstoreSizing object to accumulate changed size */ - private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) { + private void upsert(ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing) { doAddOrUpsert(cell, readpoint, memstoreSizing, false); } - private void doAddOrUpsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing, + private void doAddOrUpsert(ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing, boolean doAdd) { MutableSegment currentActive; boolean succ = false; @@ -153,8 +152,9 @@ private void doAddOrUpsert(Cell cell, long readpoint, MemStoreSizing memstoreSiz } } - protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing memstoreSizing) { - Cell toAdd = maybeCloneWithAllocator(currentActive, cell, false); + protected void doAdd(MutableSegment currentActive, ExtendedCell cell, + MemStoreSizing memstoreSizing) { + ExtendedCell toAdd = maybeCloneWithAllocator(currentActive, cell, false); boolean mslabUsed = (toAdd != cell); // This cell data is backed by the same byte[] where we read request in RPC(See // HBASE-15180). By default, MSLAB is ON and we might have copied cell to MSLAB area. If @@ -171,14 +171,14 @@ protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing mem internalAdd(currentActive, toAdd, mslabUsed, memstoreSizing); } - private void doUpsert(MutableSegment currentActive, Cell cell, long readpoint, + private void doUpsert(MutableSegment currentActive, ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing) { // Add the Cell to the MemStore - // Use the internalAdd method here since we (a) already have a lock - // and (b) cannot safely use the MSLAB here without potentially - // hitting OOME - see TestMemStore.testUpsertMSLAB for a - // test that triggers the pathological case if we don't avoid MSLAB - // here. + // Use the internalAdd method here since we + // (a) already have a lock and + // (b) cannot safely use the MSLAB here without potentially hitting OOME + // - see TestMemStore.testUpsertMSLAB for a test that triggers the pathological case if we don't + // avoid MSLAB here. // This cell data is backed by the same byte[] where we read request in RPC(See // HBASE-15180). We must do below deep copy. Or else we will keep referring to the bigger // chunk of memory and prevent it from getting GCed. @@ -195,7 +195,7 @@ private void doUpsert(MutableSegment currentActive, Cell cell, long readpoint, * @param memstoreSizing object to accumulate region size changes * @return true iff can proceed with applying the update */ - protected abstract boolean preUpdate(MutableSegment currentActive, Cell cell, + protected abstract boolean preUpdate(MutableSegment currentActive, ExtendedCell cell, MemStoreSizing memstoreSizing); /** @@ -204,16 +204,13 @@ protected abstract boolean preUpdate(MutableSegment currentActive, Cell cell, */ protected abstract void postUpdate(MutableSegment currentActive); - private static Cell deepCopyIfNeeded(Cell cell) { - if (cell instanceof ExtendedCell) { - return ((ExtendedCell) cell).deepClone(); - } - return cell; + private static ExtendedCell deepCopyIfNeeded(ExtendedCell cell) { + return cell.deepClone(); } @Override - public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { - for (Cell cell : cells) { + public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { + for (ExtendedCell cell : cells) { upsert(cell, readpoint, memstoreSizing); } } @@ -281,10 +278,8 @@ protected void dump(Logger log) { snapshot.dump(log); } - /* - * @return Return lowest of a or b or null if both a and b are null - */ - protected Cell getLowest(final Cell a, final Cell b) { + /** Returns Return lowest of a or b or null if both a and b are null */ + protected ExtendedCell getLowest(final ExtendedCell a, final ExtendedCell b) { if (a == null) { return b; } @@ -294,17 +289,17 @@ protected Cell getLowest(final Cell a, final Cell b) { return comparator.compareRows(a, b) <= 0 ? a : b; } - /* + /** * @param key Find row that follows this one. If null, return first. * @param set Set to look in for a row beyond row. * @return Next row or null if none found. If one found, will be a new KeyValue -- can be - * destroyed by subsequent calls to this method. + * destroyed by subsequent calls to this method. */ - protected Cell getNextRow(final Cell key, final NavigableSet set) { - Cell result = null; - SortedSet tail = key == null ? set : set.tailSet(key); + protected ExtendedCell getNextRow(final ExtendedCell key, final NavigableSet set) { + ExtendedCell result = null; + SortedSet tail = key == null ? set : set.tailSet(key); // Iterate until we fall into the next row; i.e. move off current row - for (Cell cell : tail) { + for (ExtendedCell cell : tail) { if (comparator.compareRows(cell, key) <= 0) { continue; } @@ -326,20 +321,20 @@ protected Cell getNextRow(final Cell key, final NavigableSet set) { * @param forceCloneOfBigCell true only during the process of flattening to CellChunkMap. * @return either the given cell or its clone */ - private Cell maybeCloneWithAllocator(MutableSegment currentActive, Cell cell, + private ExtendedCell maybeCloneWithAllocator(MutableSegment currentActive, ExtendedCell cell, boolean forceCloneOfBigCell) { return currentActive.maybeCloneWithAllocator(cell, forceCloneOfBigCell); } - /* + /** * Internal version of add() that doesn't clone Cells with the allocator, and doesn't take the * lock. Callers should ensure they already have the read lock taken - * @param toAdd the cell to add - * @param mslabUsed whether using MSLAB + * @param toAdd the cell to add + * @param mslabUsed whether using MSLAB * @param memstoreSizing object to accumulate changed size */ - private void internalAdd(MutableSegment currentActive, final Cell toAdd, final boolean mslabUsed, - MemStoreSizing memstoreSizing) { + private void internalAdd(MutableSegment currentActive, final ExtendedCell toAdd, + final boolean mslabUsed, MemStoreSizing memstoreSizing) { boolean sizeAddedPreOperation = sizeAddedPreOperation(); currentActive.add(toAdd, mslabUsed, memstoreSizing, sizeAddedPreOperation); setOldestEditTimeToNow(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java index f62b0d615149..618ae07a9a8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; @@ -86,15 +87,14 @@ protected boolean canBeFlattened() { // Create CellSet based on CellArrayMap from compacting iterator private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) { - boolean merge = (action == MemStoreCompactionStrategy.Action.MERGE || action == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS); - Cell[] cells = new Cell[numOfCells]; // build the Cell Array + ExtendedCell[] cells = new ExtendedCell[numOfCells]; // build the Cell Array int i = 0; int numUniqueKeys = 0; Cell prev = null; while (iterator.hasNext()) { - Cell c = iterator.next(); + ExtendedCell c = iterator.next(); // The scanner behind the iterator is doing all the elimination logic if (merge) { // if this is merge we just move the Cell object without copying MSLAB @@ -126,8 +126,8 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet - CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, i, false); - this.setCellSet(null, new CellSet(cam, numUniqueKeys)); // update the CellSet of this Segment + CellArrayMap cam = new CellArrayMap<>(getComparator(), cells, 0, i, false); + this.setCellSet(null, new CellSet<>(cam, numUniqueKeys)); // update the CellSet of this Segment } /*------------------------------------------------------------------------*/ @@ -135,12 +135,12 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator // (without compacting iterator) // We do not consider cells bigger than chunks! private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, - CellSet oldCellSet, MemStoreCompactionStrategy.Action action) { - Cell[] cells = new Cell[numOfCells]; // build the Cell Array - Cell curCell; + CellSet oldCellSet, MemStoreCompactionStrategy.Action action) { + ExtendedCell[] cells = new ExtendedCell[numOfCells]; // build the Cell Array + ExtendedCell curCell; int idx = 0; int numUniqueKeys = 0; - Cell prev = null; + ExtendedCell prev = null; try { while ((curCell = segmentScanner.next()) != null) { cells[idx++] = curCell; @@ -165,9 +165,9 @@ private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet - CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, idx, false); + CellArrayMap cam = new CellArrayMap<>(getComparator(), cells, 0, idx, false); // update the CellSet of this Segment - this.setCellSet(oldCellSet, new CellSet(cam, numUniqueKeys)); + this.setCellSet(oldCellSet, new CellSet<>(cam, numUniqueKeys)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java index af60c8e93cf6..1ebf693bda6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java @@ -26,15 +26,15 @@ * CellArrayMap's array of references pointing to Cell objects. */ @InterfaceAudience.Private -public class CellArrayMap extends CellFlatMap { +public class CellArrayMap extends CellFlatMap { - private final Cell[] block; + private final T[] block; /* * The Cells Array is created only when CellArrayMap is created, all sub-CellBlocks use boundary * indexes. The given Cell array must be ordered. */ - public CellArrayMap(Comparator comparator, Cell[] b, int min, int max, + public CellArrayMap(Comparator comparator, T[] b, int min, int max, boolean descending) { super(comparator, min, max, descending); this.block = b; @@ -42,12 +42,12 @@ public CellArrayMap(Comparator comparator, Cell[] b, int min, int /* To be used by base class only to create a sub-CellFlatMap */ @Override - protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { - return new CellArrayMap(comparator(), this.block, min, max, descending); + protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { + return new CellArrayMap<>(comparator(), this.block, min, max, descending); } @Override - protected Cell getCell(int i) { + protected T getCell(int i) { if ((i < minCellIdx) || (i >= maxCellIdx)) return null; return block[i]; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java index de6377668f93..a623c823cb33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; @@ -135,19 +134,17 @@ protected boolean canBeFlattened() { // Create CellSet based on CellChunkMap from compacting iterator private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) { - int numOfCellsAfterCompaction = 0; int currentChunkIdx = 0; int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER; int numUniqueKeys = 0; - Cell prev = null; + ExtendedCell prev = null; Chunk[] chunks = allocIndexChunks(numOfCells); while (iterator.hasNext()) { // the iterator hides the elimination logic for compaction boolean alreadyCopied = false; - Cell c = iterator.next(); + ExtendedCell c = iterator.next(); numOfCellsAfterCompaction++; - assert (c instanceof ExtendedCell); - if (((ExtendedCell) c).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { + if (c.getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { // CellChunkMap assumes all cells are allocated on MSLAB. // Therefore, cells which are not allocated on MSLAB initially, // are copied into MSLAB here. @@ -190,9 +187,9 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet - CellChunkMap ccm = - new CellChunkMap(getComparator(), chunks, 0, numOfCellsAfterCompaction, false); - this.setCellSet(null, new CellSet(ccm, numUniqueKeys)); // update the CellSet of this Segment + CellChunkMap ccm = + new CellChunkMap<>(getComparator(), chunks, 0, numOfCellsAfterCompaction, false); + this.setCellSet(null, new CellSet<>(ccm, numUniqueKeys)); // update the CellSet of this Segment } /*------------------------------------------------------------------------*/ @@ -200,19 +197,19 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator // (without compacting iterator) // This is a service for not-flat immutable segments private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, - CellSet oldCellSet, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) { - Cell curCell; + CellSet oldCellSet, MemStoreSizing memstoreSizing, + MemStoreCompactionStrategy.Action action) { + ExtendedCell curCell; Chunk[] chunks = allocIndexChunks(numOfCells); int currentChunkIdx = 0; int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER; int numUniqueKeys = 0; - Cell prev = null; + ExtendedCell prev = null; try { while ((curCell = segmentScanner.next()) != null) { - assert (curCell instanceof ExtendedCell); - if (((ExtendedCell) curCell).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { + if (curCell.getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { // CellChunkMap assumes all cells are allocated on MSLAB. // Therefore, cells which are not allocated on MSLAB initially, // are copied into MSLAB here. @@ -246,9 +243,10 @@ private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, segmentScanner.close(); } - CellChunkMap ccm = new CellChunkMap(getComparator(), chunks, 0, numOfCells, false); + CellChunkMap ccm = + new CellChunkMap<>(getComparator(), chunks, 0, numOfCells, false); // update the CellSet of this Segment - this.setCellSet(oldCellSet, new CellSet(ccm, numUniqueKeys)); + this.setCellSet(oldCellSet, new CellSet<>(ccm, numUniqueKeys)); } /*------------------------------------------------------------------------*/ @@ -317,7 +315,7 @@ private Chunk[] allocIndexChunks(int numOfCells) { return chunks; } - private Cell copyCellIntoMSLAB(Cell cell, MemStoreSizing memstoreSizing) { + private ExtendedCell copyCellIntoMSLAB(ExtendedCell cell, MemStoreSizing memstoreSizing) { // Take care for a special case when a cell is copied from on-heap to (probably off-heap) MSLAB. // The cell allocated as an on-heap JVM object (byte array) occupies slightly different // amount of memory, than when the cell serialized and allocated on the MSLAB. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java index e4bfcf05ab2d..f6dad226cce0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java @@ -51,7 +51,7 @@ * */ @InterfaceAudience.Private -public class CellChunkMap extends CellFlatMap { +public class CellChunkMap extends CellFlatMap { private final Chunk[] chunks; // the array of chunks, on which the index is based @@ -69,7 +69,7 @@ public class CellChunkMap extends CellFlatMap { * @param max number of Cells or the index of the cell after the maximal cell * @param descending the order of the given array */ - public CellChunkMap(Comparator comparator, Chunk[] chunks, int min, int max, + public CellChunkMap(Comparator comparator, Chunk[] chunks, int min, int max, boolean descending) { super(comparator, min, max, descending); this.chunks = chunks; @@ -86,12 +86,12 @@ public CellChunkMap(Comparator comparator, Chunk[] chunks, int min * create only CellChunkMap from CellChunkMap */ @Override - protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { - return new CellChunkMap(this.comparator(), this.chunks, min, max, descending); + protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { + return new CellChunkMap<>(this.comparator(), this.chunks, min, max, descending); } @Override - protected Cell getCell(int i) { + protected T getCell(int i) { // get the index of the relevant chunk inside chunk array int chunkIndex = (i / numOfCellRepsInChunk); ByteBuffer block = chunks[chunkIndex].getData();// get the ByteBuffer of the relevant chunk @@ -127,6 +127,9 @@ protected Cell getCell(int i) { + ". We were looking for a cell at index " + i); } - return new ByteBufferChunkKeyValue(buf, offsetOfCell, lengthOfCell, cellSeqID); + @SuppressWarnings("unchecked") + T cell = (T) new ByteBufferChunkKeyValue(buf, offsetOfCell, lengthOfCell, cellSeqID); + + return cell; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java index 8a64d80c15ed..0c95f7ddb4ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java @@ -26,8 +26,6 @@ import java.util.Set; import org.apache.hadoop.hbase.Cell; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * CellFlatMap stores a constant number of elements and is immutable after creation stage. Being @@ -38,15 +36,15 @@ * sequential array and thus requires less memory than ConcurrentSkipListMap. */ @InterfaceAudience.Private -public abstract class CellFlatMap implements NavigableMap { - private static final Logger LOG = LoggerFactory.getLogger(CellFlatMap.class); - private final Comparator comparator; +public abstract class CellFlatMap implements NavigableMap { + + private final Comparator comparator; protected int minCellIdx = 0; // the index of the minimal cell (for sub-sets) protected int maxCellIdx = 0; // the index of the cell after the maximal cell (for sub-sets) private boolean descending = false; /* C-tor */ - public CellFlatMap(Comparator comparator, int min, int max, boolean d) { + public CellFlatMap(Comparator comparator, int min, int max, boolean d) { this.comparator = comparator; this.minCellIdx = min; this.maxCellIdx = max; @@ -54,10 +52,10 @@ public CellFlatMap(Comparator comparator, int min, int max, boolea } /* Used for abstract CellFlatMap creation, implemented by derived class */ - protected abstract CellFlatMap createSubCellFlatMap(int min, int max, boolean descending); + protected abstract CellFlatMap createSubCellFlatMap(int min, int max, boolean descending); /* Returns the i-th cell in the cell block */ - protected abstract Cell getCell(int i); + protected abstract T getCell(int i); /** * Binary search for a given key in between given boundaries of the array. Positive returned @@ -67,13 +65,13 @@ public CellFlatMap(Comparator comparator, int min, int max, boolea * @param needle The key to look for in all of the entries * @return Same return value as Arrays.binarySearch. */ - private int find(Cell needle) { + private int find(T needle) { int begin = minCellIdx; int end = maxCellIdx - 1; while (begin <= end) { int mid = begin + ((end - begin) >> 1); - Cell midCell = getCell(mid); + T midCell = getCell(mid); int compareRes = comparator.compare(midCell, needle); if (compareRes == 0) { @@ -98,7 +96,7 @@ private int find(Cell needle) { * the given key exists in the set or not. taking into consideration whether the key should be * inclusive or exclusive. */ - private int getValidIndex(Cell key, boolean inclusive, boolean tail) { + private int getValidIndex(T key, boolean inclusive, boolean tail) { final int index = find(key); // get the valid (positive) insertion point from the output of the find() method int insertionPoint = index < 0 ? ~index : index; @@ -125,7 +123,7 @@ private int getValidIndex(Cell key, boolean inclusive, boolean tail) { } @Override - public Comparator comparator() { + public Comparator comparator() { return comparator; } @@ -141,8 +139,7 @@ public boolean isEmpty() { // ---------------- Sub-Maps ---------------- @Override - public NavigableMap subMap(Cell fromKey, boolean fromInclusive, Cell toKey, - boolean toInclusive) { + public NavigableMap subMap(T fromKey, boolean fromInclusive, T toKey, boolean toInclusive) { final int lessCellIndex = getValidIndex(fromKey, fromInclusive, true); final int greaterCellIndex = getValidIndex(toKey, toInclusive, false); if (descending) { @@ -153,7 +150,7 @@ public NavigableMap subMap(Cell fromKey, boolean fromInclusive, Cell } @Override - public NavigableMap headMap(Cell toKey, boolean inclusive) { + public NavigableMap headMap(T toKey, boolean inclusive) { if (descending) { return createSubCellFlatMap(getValidIndex(toKey, inclusive, false), maxCellIdx, descending); } else { @@ -162,7 +159,7 @@ public NavigableMap headMap(Cell toKey, boolean inclusive) { } @Override - public NavigableMap tailMap(Cell fromKey, boolean inclusive) { + public NavigableMap tailMap(T fromKey, boolean inclusive) { if (descending) { return createSubCellFlatMap(minCellIdx, getValidIndex(fromKey, inclusive, true), descending); } else { @@ -171,28 +168,28 @@ public NavigableMap tailMap(Cell fromKey, boolean inclusive) { } @Override - public NavigableMap descendingMap() { + public NavigableMap descendingMap() { return createSubCellFlatMap(minCellIdx, maxCellIdx, true); } @Override - public NavigableMap subMap(Cell k1, Cell k2) { + public NavigableMap subMap(T k1, T k2) { return this.subMap(k1, true, k2, true); } @Override - public NavigableMap headMap(Cell k) { + public NavigableMap headMap(T k) { return this.headMap(k, true); } @Override - public NavigableMap tailMap(Cell k) { + public NavigableMap tailMap(T k) { return this.tailMap(k, true); } // -------------------------------- Key's getters -------------------------------- @Override - public Cell firstKey() { + public T firstKey() { if (isEmpty()) { return null; } @@ -200,7 +197,7 @@ public Cell firstKey() { } @Override - public Cell lastKey() { + public T lastKey() { if (isEmpty()) { return null; } @@ -208,7 +205,7 @@ public Cell lastKey() { } @Override - public Cell lowerKey(Cell k) { + public T lowerKey(T k) { if (isEmpty()) { return null; } @@ -219,7 +216,7 @@ public Cell lowerKey(Cell k) { } @Override - public Cell floorKey(Cell k) { + public T floorKey(T k) { if (isEmpty()) { return null; } @@ -229,7 +226,7 @@ public Cell floorKey(Cell k) { } @Override - public Cell ceilingKey(Cell k) { + public T ceilingKey(T k) { if (isEmpty()) { return null; } @@ -239,7 +236,7 @@ public Cell ceilingKey(Cell k) { } @Override - public Cell higherKey(Cell k) { + public T higherKey(T k) { if (isEmpty()) { return null; } @@ -250,7 +247,7 @@ public Cell higherKey(Cell k) { @Override public boolean containsKey(Object o) { - int index = find((Cell) o); + int index = find((T) o); return (index >= 0); } @@ -260,99 +257,99 @@ public boolean containsValue(Object o) { // use containsKey(Object o) instead } @Override - public Cell get(Object o) { - int index = find((Cell) o); + public T get(Object o) { + int index = find((T) o); return (index >= 0) ? getCell(index) : null; } // -------------------------------- Entry's getters -------------------------------- - private static class CellFlatMapEntry implements Entry { - private final Cell cell; + private static class CellFlatMapEntry implements Entry { + private final T cell; - public CellFlatMapEntry(Cell cell) { + public CellFlatMapEntry(T cell) { this.cell = cell; } @Override - public Cell getKey() { + public T getKey() { return cell; } @Override - public Cell getValue() { + public T getValue() { return cell; } @Override - public Cell setValue(Cell value) { + public T setValue(T value) { throw new UnsupportedOperationException(); } } @Override - public Entry lowerEntry(Cell k) { - Cell cell = lowerKey(k); + public Entry lowerEntry(T k) { + T cell = lowerKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry higherEntry(Cell k) { - Cell cell = higherKey(k); + public Entry higherEntry(T k) { + T cell = higherKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry ceilingEntry(Cell k) { - Cell cell = ceilingKey(k); + public Entry ceilingEntry(T k) { + T cell = ceilingKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry floorEntry(Cell k) { - Cell cell = floorKey(k); + public Entry floorEntry(T k) { + T cell = floorKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry firstEntry() { - Cell cell = firstKey(); + public Entry firstEntry() { + T cell = firstKey(); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry lastEntry() { - Cell cell = lastKey(); + public Entry lastEntry() { + T cell = lastKey(); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } // The following 2 methods (pollFirstEntry, pollLastEntry) are unsupported because these are // updating methods. @Override - public Entry pollFirstEntry() { + public Entry pollFirstEntry() { throw new UnsupportedOperationException(); } @Override - public Entry pollLastEntry() { + public Entry pollLastEntry() { throw new UnsupportedOperationException(); } @@ -362,7 +359,7 @@ public Entry pollLastEntry() { // fill up with Cells and provided in construction time. // Later the structure is immutable. @Override - public Cell put(Cell k, Cell v) { + public T put(T k, T v) { throw new UnsupportedOperationException(); } @@ -372,43 +369,43 @@ public void clear() { } @Override - public Cell remove(Object o) { + public T remove(Object o) { throw new UnsupportedOperationException(); } @Override - public void putAll(Map map) { + public void putAll(Map map) { throw new UnsupportedOperationException(); } // -------------------------------- Sub-Sets -------------------------------- @Override - public NavigableSet navigableKeySet() { + public NavigableSet navigableKeySet() { throw new UnsupportedOperationException(); } @Override - public NavigableSet descendingKeySet() { + public NavigableSet descendingKeySet() { throw new UnsupportedOperationException(); } @Override - public NavigableSet keySet() { + public NavigableSet keySet() { throw new UnsupportedOperationException(); } @Override - public Collection values() { + public Collection values() { return new CellFlatMapCollection(); } @Override - public Set> entrySet() { + public Set> entrySet() { throw new UnsupportedOperationException(); } // -------------------------------- Iterator K -------------------------------- - private final class CellFlatMapIterator implements Iterator { + private final class CellFlatMapIterator implements Iterator { int index; private CellFlatMapIterator() { @@ -421,8 +418,8 @@ public boolean hasNext() { } @Override - public Cell next() { - Cell result = getCell(index); + public T next() { + T result = getCell(index); if (descending) { index--; } else { @@ -438,7 +435,7 @@ public void remove() { } // -------------------------------- Collection -------------------------------- - private final class CellFlatMapCollection implements Collection { + private final class CellFlatMapCollection implements Collection { @Override public int size() { @@ -461,7 +458,7 @@ public boolean contains(Object o) { } @Override - public Iterator iterator() { + public Iterator iterator() { return new CellFlatMapIterator(); } @@ -476,7 +473,7 @@ public T[] toArray(T[] ts) { } @Override - public boolean add(Cell k) { + public boolean add(T k) { throw new UnsupportedOperationException(); } @@ -491,7 +488,7 @@ public boolean containsAll(Collection collection) { } @Override - public boolean addAll(Collection collection) { + public boolean addAll(Collection collection) { throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java index 4890c8a84494..c8d9b5b2ea67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java @@ -36,7 +36,7 @@ * and set and won't throw ConcurrentModificationException when iterating. */ @InterfaceAudience.Private -public class CellSet implements NavigableSet { +public class CellSet implements NavigableSet { public static final int UNKNOWN_NUM_UNIQUES = -1; // Implemented on top of a {@link java.util.concurrent.ConcurrentSkipListMap} @@ -44,127 +44,127 @@ public class CellSet implements NavigableSet { // is not already present.", this implementation "Adds the specified element to this set EVEN // if it is already present overwriting what was there previous". // Otherwise, has same attributes as ConcurrentSkipListSet - private final NavigableMap delegatee; /// + private final NavigableMap delegatee; /// private final int numUniqueKeys; - public CellSet(final CellComparator c) { + public CellSet(CellComparator c) { this.delegatee = new ConcurrentSkipListMap<>(c.getSimpleComparator()); this.numUniqueKeys = UNKNOWN_NUM_UNIQUES; } - CellSet(final NavigableMap m, int numUniqueKeys) { + CellSet(final NavigableMap m, int numUniqueKeys) { this.delegatee = m; this.numUniqueKeys = numUniqueKeys; } - CellSet(final NavigableMap m) { + CellSet(final NavigableMap m) { this.delegatee = m; this.numUniqueKeys = UNKNOWN_NUM_UNIQUES; } - NavigableMap getDelegatee() { + NavigableMap getDelegatee() { return delegatee; } @Override - public Cell ceiling(Cell e) { + public T ceiling(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Iterator descendingIterator() { + public Iterator descendingIterator() { return this.delegatee.descendingMap().values().iterator(); } @Override - public NavigableSet descendingSet() { + public NavigableSet descendingSet() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell floor(Cell e) { + public T floor(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public SortedSet headSet(final Cell toElement) { + public SortedSet headSet(final T toElement) { return headSet(toElement, false); } @Override - public NavigableSet headSet(final Cell toElement, boolean inclusive) { - return new CellSet(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES); + public NavigableSet headSet(final T toElement, boolean inclusive) { + return new CellSet<>(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES); } @Override - public Cell higher(Cell e) { + public T higher(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Iterator iterator() { + public Iterator iterator() { return this.delegatee.values().iterator(); } @Override - public Cell lower(Cell e) { + public T lower(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell pollFirst() { + public T pollFirst() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell pollLast() { + public T pollLast() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public SortedSet subSet(Cell fromElement, Cell toElement) { + public SortedSet subSet(T fromElement, T toElement) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement, + public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement, boolean toInclusive) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public SortedSet tailSet(Cell fromElement) { + public SortedSet tailSet(T fromElement) { return tailSet(fromElement, true); } @Override - public NavigableSet tailSet(Cell fromElement, boolean inclusive) { - return new CellSet(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES); + public NavigableSet tailSet(T fromElement, boolean inclusive) { + return new CellSet<>(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES); } @Override - public Comparator comparator() { + public Comparator comparator() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell first() { + public T first() { return this.delegatee.firstEntry().getValue(); } @Override - public Cell last() { + public T last() { return this.delegatee.lastEntry().getValue(); } @Override - public boolean add(Cell e) { + public boolean add(T e) { return this.delegatee.put(e, e) == null; } @Override - public boolean addAll(Collection c) { + public boolean addAll(Collection c) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java index 1d838d86abcf..f3f260f8cf7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.yetus.audience.InterfaceAudience; @@ -34,14 +34,14 @@ public interface CellSink { * Append the given cell * @param cell the cell to be added */ - void append(Cell cell) throws IOException; + void append(ExtendedCell cell) throws IOException; /** * Append the given (possibly partial) list of cells of a row * @param cellList the cell list to be added */ - default void appendAll(List cellList) throws IOException { - for (Cell cell : cellList) { + default void appendAll(List cellList) throws IOException { + for (ExtendedCell cell : cellList) { append(cell); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 1a2cbc6bdabf..568a7b061021 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; @@ -306,7 +307,7 @@ public void stopReplayingFromWAL() { * @return true iff can proceed with applying the update */ @Override - protected boolean preUpdate(MutableSegment currentActive, Cell cell, + protected boolean preUpdate(MutableSegment currentActive, ExtendedCell cell, MemStoreSizing memstoreSizing) { if (currentActive.sharedLock()) { if (checkAndAddToActiveSize(currentActive, cell, memstoreSizing)) { @@ -621,8 +622,8 @@ boolean isMemStoreFlushingInMemory() { * @param cell Find the row that comes after this one. If null, we return the first. * @return Next row or null if none found. */ - Cell getNextRow(final Cell cell) { - Cell lowest = null; + ExtendedCell getNextRow(final ExtendedCell cell) { + ExtendedCell lowest = null; List segments = getSegments(); for (Segment segment : segments) { if (lowest == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index f955eb5d5825..af09e462140c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -23,6 +23,7 @@ import java.util.SortedSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -93,7 +94,7 @@ public void close() { * @return either the given cell or its clone */ @Override - public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) { + public ExtendedCell maybeCloneWithAllocator(ExtendedCell cell, boolean forceCloneOfBigCell) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @@ -192,17 +193,17 @@ public TimeRangeTracker getTimeRangeTracker() { // *** Methods for SegmentsScanner @Override - public Cell last() { + public ExtendedCell last() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - public Iterator iterator() { + public Iterator iterator() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - public SortedSet headSet(Cell firstKeyOnRow) { + public SortedSet headSet(ExtendedCell firstKeyOnRow) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @@ -218,18 +219,18 @@ public int compareRows(Cell left, Cell right) { /** Returns a set of all cells in the segment */ @Override - protected CellSet getCellSet() { + protected CellSet getCellSet() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, + protected void internalAdd(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, + protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @@ -240,7 +241,7 @@ protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, * @return a subset of the segment cell set, which starts with the given cell */ @Override - protected SortedSet tailSet(Cell firstCell) { + protected SortedSet tailSet(ExtendedCell firstCell) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java index e5ee8041c350..b800178e8a28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,7 +54,7 @@ public DateTieredMultiFileWriter(List lowerBoundaries, } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { Map.Entry entry = lowerBoundary2Writer.floorEntry(cell.getTimestamp()); StoreFileWriter writer = entry.getValue(); if (writer == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 420dad51e377..433105e998f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.InnerStoreCellComparator; @@ -155,7 +156,7 @@ protected List getSegments() throws IOException { * @param cell Find the row that comes after this one. If null, we return the first. * @return Next row or null if none found. */ - Cell getNextRow(final Cell cell) { + ExtendedCell getNextRow(final ExtendedCell cell) { return getLowest(getNextRow(cell, this.getActive().getCellSet()), getNextRow(cell, this.snapshot.getCellSet())); } @@ -165,7 +166,7 @@ public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) { } @Override - protected boolean preUpdate(MutableSegment currentActive, Cell cell, + protected boolean preUpdate(MutableSegment currentActive, ExtendedCell cell, MemStoreSizing memstoreSizing) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 6a07a6c9a088..19937ecdf00f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -33,10 +33,10 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -300,7 +300,7 @@ private void validateMobFile(Path path) throws IOException { * @param cacheBlocks Whether the scanner should cache blocks. * @return The cell found in the mob file. */ - public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException { + public MobCell resolve(ExtendedCell reference, boolean cacheBlocks) throws IOException { return resolve(reference, cacheBlocks, -1, true); } @@ -313,8 +313,8 @@ public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException { * resolved. * @return The cell found in the mob file. */ - public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyValueOnMobCellMiss) - throws IOException { + public MobCell resolve(ExtendedCell reference, boolean cacheBlocks, + boolean readEmptyValueOnMobCellMiss) throws IOException { return resolve(reference, cacheBlocks, -1, readEmptyValueOnMobCellMiss); } @@ -328,7 +328,7 @@ public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyVal * corrupt. * @return The cell found in the mob file. */ - public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt, + public MobCell resolve(ExtendedCell reference, boolean cacheBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException { MobCell mobCell = null; if (MobUtils.hasValidMobRefCellValue(reference)) { @@ -343,7 +343,7 @@ public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt, if (mobCell == null) { LOG.warn("The Cell result is null, assemble a new Cell with the same row,family," + "qualifier,timestamp,type and tags but with an empty value to return."); - Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + ExtendedCell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength()) .setFamily(reference.getFamilyArray(), reference.getFamilyOffset(), reference.getFamilyLength()) @@ -397,7 +397,7 @@ public List getLocations(TableName tableName) throws IOException { * corrupt. * @return The found cell. Null if there's no such a cell. */ - private MobCell readCell(List locations, String fileName, Cell search, + private MobCell readCell(List locations, String fileName, ExtendedCell search, boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException { FileSystem fs = getFileSystem(); IOException ioe = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index fdc50bc69476..2792ab2754cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; @@ -3243,18 +3244,18 @@ public void delete(Delete delete) throws IOException { *

    * Caller should have the row and region locks. */ - private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, + private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, byte[] byteNow) throws IOException { - for (Map.Entry> e : familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { byte[] family = e.getKey(); - List cells = e.getValue(); + List cells = e.getValue(); assert cells instanceof RandomAccess; Map kvCount = new TreeMap<>(Bytes.BYTES_COMPARATOR); int listSize = cells.size(); for (int i = 0; i < listSize; i++) { - Cell cell = cells.get(i); + ExtendedCell cell = cells.get(i); // Check if time is LATEST, change to time of most recent addition if so // This is expensive. if ( @@ -3340,7 +3341,7 @@ private abstract static class BatchOperation { protected final OperationStatus[] retCodeDetails; protected final WALEdit[] walEditsFromCoprocessors; // reference family cell maps directly so coprocessors can mutate them if desired - protected final Map>[] familyCellMaps; + protected final Map>[] familyCellMaps; // For Increment/Append operations protected final Result[] results; @@ -3509,7 +3510,9 @@ protected void checkAndPrepareMutation(int index, long timestamp) throws IOExcep if (mutation instanceof Put || mutation instanceof Delete) { // store the family map reference to allow for mutations - familyCellMaps[index] = mutation.getFamilyCellMap(); + // we know that in mutation, only ExtendedCells are allow so here we do a fake cast, to + // simplify later logic + familyCellMaps[index] = (Map) mutation.getFamilyCellMap(); } // store durability for the batch (highest durability of all operations in the batch) @@ -3715,19 +3718,19 @@ public boolean visit(int index) throws IOException { protected void addNonSkipWALMutationsToWALEdit( final MiniBatchOperationInProgress miniBatchOp, WALEdit walEdit, - List cellsFromCP, Map> familyCellMap) { + List cellsFromCP, Map> familyCellMap) { doAddCellsToWALEdit(walEdit, cellsFromCP, familyCellMap); } protected static void doAddCellsToWALEdit(WALEdit walEdit, List cellsFromCP, - Map> familyCellMap) { + Map> familyCellMap) { walEdit.add(cellsFromCP); - walEdit.add(familyCellMap); + walEdit.add((Map) familyCellMap); } protected abstract void cacheSkipWALMutationForRegionReplication( final MiniBatchOperationInProgress miniBatchOp, - List> walEdits, Map> familyCellMap); + List> walEdits, Map> familyCellMap); /** * This method completes mini-batch operations by calling postBatchMutate() CP hook (if @@ -3782,11 +3785,11 @@ public void doPostOpCleanupForMiniBatch( * also does not check the families for validity. * @param familyMap Map of Cells by family */ - protected void applyFamilyMapToMemStore(Map> familyMap, + protected void applyFamilyMapToMemStore(Map> familyMap, MemStoreSizing memstoreAccounting) { - for (Map.Entry> e : familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { byte[] family = e.getKey(); - List cells = e.getValue(); + List cells = e.getValue(); assert cells instanceof RandomAccess; region.applyToMemStore(region.getStore(family), cells, false, memstoreAccounting); } @@ -3965,7 +3968,7 @@ public void prepareMiniBatchOperations(MiniBatchOperationInProgress mi return true; } - List results = returnResults ? new ArrayList<>(mutation.size()) : null; + List results = returnResults ? new ArrayList<>(mutation.size()) : null; familyCellMaps[index] = reckonDeltas(mutation, results, timestamp); this.results[index] = results != null ? Result.create(results) : Result.EMPTY_RESULT; @@ -4055,19 +4058,19 @@ private static Get toGet(final Mutation mutation) throws IOException { return get; } - private Map> reckonDeltas(Mutation mutation, List results, long now) - throws IOException { + private Map> reckonDeltas(Mutation mutation, + List results, long now) throws IOException { assert mutation instanceof Increment || mutation instanceof Append; - Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); + Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); // Process a Store/family at a time. for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { final byte[] columnFamilyName = entry.getKey(); - List deltas = entry.getValue(); + List deltas = (List) entry.getValue(); // Reckon for the Store what to apply to WAL and MemStore. - List toApply = + List toApply = reckonDeltasByStore(region.stores.get(columnFamilyName), mutation, now, deltas, results); if (!toApply.isEmpty()) { - for (Cell cell : toApply) { + for (ExtendedCell cell : toApply) { HStore store = region.getStore(cell); if (store == null) { region.checkFamily(CellUtil.cloneFamily(cell)); @@ -4092,11 +4095,11 @@ private Map> reckonDeltas(Mutation mutation, List resul * @return Resulting Cells after deltas have been applied to current values. Side * effect is our filling out of the results List. */ - private List reckonDeltasByStore(HStore store, Mutation mutation, long now, - List deltas, List results) throws IOException { + private List reckonDeltasByStore(HStore store, Mutation mutation, long now, + List deltas, List results) throws IOException { assert mutation instanceof Increment || mutation instanceof Append; byte[] columnFamily = store.getColumnFamilyDescriptor().getName(); - List> cellPairs = new ArrayList<>(deltas.size()); + List> cellPairs = new ArrayList<>(deltas.size()); // Sort the cells so that they match the order that they appear in the Get results. // Otherwise, we won't be able to find the existing values if the cells are not specified @@ -4105,7 +4108,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now // Get previous values for all columns in this family. Get get = new Get(mutation.getRow()); - for (Cell cell : deltas) { + for (ExtendedCell cell : deltas) { get.addColumn(columnFamily, CellUtil.cloneQualifier(cell)); } TimeRange tr; @@ -4122,14 +4125,14 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now try (RegionScanner scanner = region.getScanner(new Scan(get))) { // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 - List currentValues = new ArrayList<>(); - scanner.next(currentValues); + List currentValues = new ArrayList<>(); + scanner.next((List) currentValues); // Iterate the input columns and update existing values if they were found, otherwise // add new column initialized to the delta amount int currentValuesIndex = 0; for (int i = 0; i < deltas.size(); i++) { - Cell delta = deltas.get(i); - Cell currentValue = null; + ExtendedCell delta = deltas.get(i); + ExtendedCell currentValue = null; if ( currentValuesIndex < currentValues.size() && CellUtil.matchingQualifier(currentValues.get(currentValuesIndex), delta) @@ -4140,7 +4143,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now } } // Switch on whether this an increment or an append building the new Cell to apply. - Cell newCell; + ExtendedCell newCell; if (mutation instanceof Increment) { long deltaAmount = getLongValue(delta); final long newValue = @@ -4174,14 +4177,14 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now if (region.coprocessorHost != null) { // Here the operation must be increment or append. cellPairs = mutation instanceof Increment - ? region.coprocessorHost.postIncrementBeforeWAL(mutation, cellPairs) - : region.coprocessorHost.postAppendBeforeWAL(mutation, cellPairs); + ? region.coprocessorHost.postIncrementBeforeWAL(mutation, (List) cellPairs) + : region.coprocessorHost.postAppendBeforeWAL(mutation, (List) cellPairs); } } return cellPairs.stream().map(Pair::getSecond).collect(Collectors.toList()); } - private static Cell reckonDelta(final Cell delta, final Cell currentCell, + private static ExtendedCell reckonDelta(final Cell delta, final Cell currentCell, final byte[] columnFamily, final long now, Mutation mutation, Function supplier) throws IOException { // Forward any tags found on the delta. @@ -4201,7 +4204,11 @@ private static Cell reckonDelta(final Cell delta, final Cell currentCell, } else { tags = TagUtil.carryForwardTTLTag(tags, mutation.getTTL()); PrivateCellUtil.updateLatestStamp(delta, now); - return CollectionUtils.isEmpty(tags) ? delta : PrivateCellUtil.createCell(delta, tags); + assert delta instanceof ExtendedCell; + ExtendedCell deltaCell = (ExtendedCell) delta; + return CollectionUtils.isEmpty(tags) + ? deltaCell + : PrivateCellUtil.createCell(deltaCell, tags); } } @@ -4234,7 +4241,8 @@ private static long getLongValue(final Cell cell) throws DoNotRetryIOException { @Override protected void cacheSkipWALMutationForRegionReplication( MiniBatchOperationInProgress miniBatchOp, - List> nonceKeyAndWALEdits, Map> familyCellMap) { + List> nonceKeyAndWALEdits, + Map> familyCellMap) { if (!this.regionReplicateEnable) { return; } @@ -4251,7 +4259,7 @@ protected void cacheSkipWALMutationForRegionReplication( this.createWALEditForReplicateSkipWAL(miniBatchOp, nonceKeyAndWALEdits); miniBatchOp.setWalEditForReplicateIfExistsSkipWAL(walEditForReplicateIfExistsSkipWAL); } - walEditForReplicateIfExistsSkipWAL.add(familyCellMap); + walEditForReplicateIfExistsSkipWAL.add((Map) familyCellMap); } @@ -4270,7 +4278,7 @@ private WALEdit createWALEditForReplicateSkipWAL( @Override protected void addNonSkipWALMutationsToWALEdit( final MiniBatchOperationInProgress miniBatchOp, WALEdit walEdit, - List cellsFromCP, Map> familyCellMap) { + List cellsFromCP, Map> familyCellMap) { super.addNonSkipWALMutationsToWALEdit(miniBatchOp, walEdit, cellsFromCP, familyCellMap); WALEdit walEditForReplicateIfExistsSkipWAL = @@ -4514,7 +4522,7 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> cpFamilyMap = cpMutation.getFamilyCellMap(); + Map> cpFamilyMap = (Map) cpMutation.getFamilyCellMap(); region.rewriteCellTags(cpFamilyMap, mutation); // will get added to the memStore later mergeFamilyMaps(familyCellMaps[i], cpFamilyMap); @@ -4523,7 +4531,7 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress cells : cpFamilyMap.values()) { + for (List cells : cpFamilyMap.values()) { miniBatchOp.addCellCount(cells.size()); } } @@ -4532,10 +4540,10 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> familyMap, - Map> toBeMerged) { - for (Map.Entry> entry : toBeMerged.entrySet()) { - List cells = familyMap.get(entry.getKey()); + private void mergeFamilyMaps(Map> familyMap, + Map> toBeMerged) { + for (Map.Entry> entry : toBeMerged.entrySet()) { + List cells = familyMap.get(entry.getKey()); if (cells == null) { familyMap.put(entry.getKey(), entry.getValue()); } else { @@ -4667,7 +4675,7 @@ public void completeMiniBatchOperations( @Override protected void cacheSkipWALMutationForRegionReplication( MiniBatchOperationInProgress miniBatchOp, List> walEdits, - Map> familyCellMap) { + Map> familyCellMap) { // There is no action to do if current region is secondary replica } @@ -5086,14 +5094,14 @@ private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutat byte[] byteTs = Bytes.toBytes(ts); if (mutation != null) { if (mutation instanceof Put) { - updateCellTimestamps(mutation.getFamilyCellMap().values(), byteTs); + updateCellTimestamps((Iterable) mutation.getFamilyCellMap().values(), byteTs); } // And else 'delete' is not needed since it already does a second get, and sets the // timestamp from get (see prepareDeleteTimestamps). } else { for (Mutation m : rowMutations.getMutations()) { if (m instanceof Put) { - updateCellTimestamps(m.getFamilyCellMap().values(), byteTs); + updateCellTimestamps((Iterable) m.getFamilyCellMap().values(), byteTs); } } // And else 'delete' is not needed since it already does a second get, and sets the @@ -5204,12 +5212,14 @@ public void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare manifest.addRegion(this); } - private void updateSequenceId(final Iterable> cellItr, final long sequenceId) + private void updateSequenceId(final Iterable> cellItr, final long sequenceId) throws IOException { - for (List cells : cellItr) { - if (cells == null) return; - for (Cell cell : cells) { - PrivateCellUtil.setSequenceId(cell, sequenceId); + for (List cells : cellItr) { + if (cells == null) { + return; + } + for (ExtendedCell cell : cells) { + cell.setSequenceId(sequenceId); } } } @@ -5218,10 +5228,12 @@ private void updateSequenceId(final Iterable> cellItr, final long seq * Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP} * provided current timestamp. */ - private static void updateCellTimestamps(final Iterable> cellItr, final byte[] now) - throws IOException { - for (List cells : cellItr) { - if (cells == null) continue; + private static void updateCellTimestamps(final Iterable> cellItr, + final byte[] now) throws IOException { + for (List cells : cellItr) { + if (cells == null) { + continue; + } // Optimization: 'foreach' loop is not used. See: // HBASE-12023 HRegion.applyFamilyMapToMemstore creates too many iterator objects assert cells instanceof RandomAccess; @@ -5235,7 +5247,7 @@ private static void updateCellTimestamps(final Iterable> cellItr, fin /** * Possibly rewrite incoming cell tags. */ - private void rewriteCellTags(Map> familyMap, final Mutation m) { + private void rewriteCellTags(Map> familyMap, final Mutation m) { // Check if we have any work to do and early out otherwise // Update these checks as more logic is added here if (m.getTTL() == Long.MAX_VALUE) { @@ -5243,12 +5255,12 @@ private void rewriteCellTags(Map> familyMap, final Mutation m } // From this point we know we have some work to do - for (Map.Entry> e : familyMap.entrySet()) { - List cells = e.getValue(); + for (Map.Entry> e : familyMap.entrySet()) { + List cells = e.getValue(); assert cells instanceof RandomAccess; int listSize = cells.size(); for (int i = 0; i < listSize; i++) { - Cell cell = cells.get(i); + ExtendedCell cell = cells.get(i); List newTags = TagUtil.carryForwardTags(null, cell); newTags = TagUtil.carryForwardTTLTag(newTags, m.getTTL()); // Rewrite the cell with the updated set of tags @@ -5318,7 +5330,7 @@ public void setReadsEnabled(boolean readsEnabled) { * set; when set we will run operations that make sense in the increment/append * scenario but that do not make sense otherwise. */ - private void applyToMemStore(HStore store, List cells, boolean delta, + private void applyToMemStore(HStore store, List cells, boolean delta, MemStoreSizing memstoreAccounting) { // Any change in how we update Store/MemStore needs to also be done in other applyToMemStore!!!! boolean upsert = delta && store.getColumnFamilyDescriptor().getMaxVersions() == 1; @@ -5666,7 +5678,9 @@ private long replayRecoveredEdits(final Path edits, Map maxSeqIdIn boolean flush = false; MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); - for (Cell cell : val.getCells()) { + for (Cell c : val.getCells()) { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; // Check this edit is for me. Also, guard against writing the special // METACOLUMN info such as HBASE::CACHEFLUSH entries if (WALEdit.isMetaEditFamily(cell)) { @@ -6518,10 +6532,11 @@ void replayWALBulkLoadEventMarker(WALProtos.BulkLoadDescriptor bulkLoadEvent) th *

  • We will advance MVCC in the caller directly.
  • * */ - private void replayWALBatchMutate(Map> family2Cells) throws IOException { + private void replayWALBatchMutate(Map> family2Cells) + throws IOException { startRegionOperation(Operation.REPLAY_BATCH_MUTATE); try { - for (Map.Entry> entry : family2Cells.entrySet()) { + for (Map.Entry> entry : family2Cells.entrySet()) { applyToMemStore(getStore(entry.getKey()), entry.getValue(), false, memStoreSizing); } } finally { @@ -6667,13 +6682,15 @@ void replayWALEntry(WALEntry entry, CellScanner cells) throws IOException { } return; } - Map> family2Cells = new TreeMap<>(Bytes.BYTES_COMPARATOR); + Map> family2Cells = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < count; i++) { // Throw index out of bounds if our cell count is off if (!cells.advance()) { throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i); } - Cell cell = cells.current(); + Cell c = cells.current(); + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; if (WALEdit.isMetaEditFamily(cell)) { // If there is meta edit, i.e, we have done flush/compaction/open, then we need to apply // the previous cells first, and then replay the special meta edit. The meta edit is like @@ -6868,7 +6885,7 @@ private void checkTargetRegion(byte[] encodedRegionName, String exceptionMsg, Ob * @param s Store to add edit too. * @param cell Cell to add. */ - protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreAccounting) { + protected void restoreEdit(HStore s, ExtendedCell cell, MemStoreSizing memstoreAccounting) { s.add(cell, memstoreAccounting); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 6fccccfc8203..e4deae852e5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -693,8 +694,8 @@ public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte f.initReader(); try { Cell splitKey = PrivateCellUtil.createFirstOnRow(splitRow); - Optional lastKey = f.getLastKey(); - Optional firstKey = f.getFirstKey(); + Optional lastKey = f.getLastKey(); + Optional firstKey = f.getFirstKey(); if (top) { // check if larger than last key. // If lastKey is null means storefile is empty. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 3c879dbdb730..f2b0fa69190b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.InnerStoreCellComparator; import org.apache.hadoop.hbase.MemoryCompactionPolicy; @@ -554,7 +555,7 @@ public void stopReplayingFromWAL() { /** * Adds a value to the memstore */ - public void add(final Cell cell, MemStoreSizing memstoreSizing) { + public void add(final ExtendedCell cell, MemStoreSizing memstoreSizing) { storeEngine.readLock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { @@ -571,7 +572,7 @@ public void add(final Cell cell, MemStoreSizing memstoreSizing) { /** * Adds the specified value to the memstore */ - public void add(final Iterable cells, MemStoreSizing memstoreSizing) { + public void add(final Iterable cells, MemStoreSizing memstoreSizing) { storeEngine.readLock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { @@ -615,7 +616,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); - Optional lk = reader.getLastKey(); + Optional lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get()); @@ -868,7 +869,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); - Optional lk = reader.getLastKey(); + Optional lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get()); if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { @@ -1913,7 +1914,7 @@ public long getSmallestReadPoint() { * across all of them. * @param readpoint readpoint below which we can safely remove duplicate KVs */ - public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { + public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { this.storeEngine.readLock(); try { this.memstore.upsert(cells, readpoint, memstoreSizing); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index b2e222428bac..a8f3e15f3a49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -32,8 +32,8 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.io.TimeRange; @@ -159,9 +159,9 @@ public class HStoreFile implements StoreFile { private long maxMemstoreTS = -1; // firstKey, lastkey and cellComparator will be set when openReader. - private Optional firstKey; + private Optional firstKey; - private Optional lastKey; + private Optional lastKey; private CellComparator comparator; @@ -170,12 +170,12 @@ public CacheConfig getCacheConf() { } @Override - public Optional getFirstKey() { + public Optional getFirstKey() { return firstKey; } @Override - public Optional getLastKey() { + public Optional getLastKey() { return lastKey; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java index a16c2cca034f..da40c7c49b7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java @@ -20,14 +20,14 @@ import com.google.errorprone.annotations.RestrictedApi; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.yetus.audience.InterfaceAudience; /** * A MemStoreLAB implementation which wraps N MemStoreLABs. Its main duty is in proper managing the * close of the individual MemStoreLAB. This is treated as an immutable one and so do not allow to - * add any more Cells into it. {@link #copyCellInto(Cell)} throws Exception + * add any more Cells into it. {@link #copyCellInto(ExtendedCell)} throws Exception */ @InterfaceAudience.Private public class ImmutableMemStoreLAB implements MemStoreLAB { @@ -45,7 +45,7 @@ public ImmutableMemStoreLAB(List mslabs) { } @Override - public Cell copyCellInto(Cell cell) { + public ExtendedCell copyCellInto(ExtendedCell cell) { throw new IllegalStateException("This is an Immutable MemStoreLAB."); } @@ -58,7 +58,7 @@ public Cell copyCellInto(Cell cell) { * data, or null when this cell cannt be copied. */ @Override - public Cell forceCopyOfBigCellInto(Cell cell) { + public ExtendedCell forceCopyOfBigCellInto(ExtendedCell cell) { MemStoreLAB mslab = this.mslabs.get(0); return mslab.forceCopyOfBigCellInto(cell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 9f42e7ce2ad4..5fbb680edcd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -25,6 +25,7 @@ import java.util.function.IntConsumer; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -94,7 +95,7 @@ public KeyValueHeap(List scanners, CellComparator com } @Override - public Cell peek() { + public ExtendedCell peek() { if (this.current == null) { return null; } @@ -111,12 +112,12 @@ public void recordBlockSize(IntConsumer blockSizeConsumer) { } @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { if (this.current == null) { return null; } - Cell kvReturn = this.current.next(); - Cell kvNext = this.current.peek(); + ExtendedCell kvReturn = this.current.next(); + ExtendedCell kvNext = this.current.peek(); if (kvNext == null) { this.scannersForDelayedClose.add(this.current); this.current = null; @@ -235,24 +236,25 @@ public void close() { * As individual scanners may run past their ends, those scanners are automatically closed and * removed from the heap. *

    - * This function (and {@link #reseek(Cell)}) does not do multi-column Bloom filter and lazy-seek - * optimizations. To enable those, call {@link #requestSeek(Cell, boolean, boolean)}. + * This function (and {@link #reseek(ExtendedCell)}) does not do multi-column Bloom filter and + * lazy-seek optimizations. To enable those, call + * {@link #requestSeek(ExtendedCell, boolean, boolean)}. * @param seekKey KeyValue to seek at or after * @return true if KeyValues exist at or after specified key, false if not */ @Override - public boolean seek(Cell seekKey) throws IOException { + public boolean seek(ExtendedCell seekKey) throws IOException { return generalizedSeek(false, // This is not a lazy seek seekKey, false, // forward (false: this is not a reseek) false); // Not using Bloom filters } /** - * This function is identical to the {@link #seek(Cell)} function except that + * This function is identical to the {@link #seek(ExtendedCell)} function except that * scanner.seek(seekKey) is changed to scanner.reseek(seekKey). */ @Override - public boolean reseek(Cell seekKey) throws IOException { + public boolean reseek(ExtendedCell seekKey) throws IOException { return generalizedSeek(false, // This is not a lazy seek seekKey, true, // forward (true because this is reseek) false); // Not using Bloom filters @@ -262,7 +264,8 @@ public boolean reseek(Cell seekKey) throws IOException { * {@inheritDoc} */ @Override - public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell key, boolean forward, boolean useBloom) + throws IOException { return generalizedSeek(true, key, forward, useBloom); } @@ -274,8 +277,8 @@ public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws I * @param forward whether to seek forward (also known as reseek) * @param useBloom whether to optimize seeks using Bloom filters */ - private boolean generalizedSeek(boolean isLazy, Cell seekKey, boolean forward, boolean useBloom) - throws IOException { + private boolean generalizedSeek(boolean isLazy, ExtendedCell seekKey, boolean forward, + boolean useBloom) throws IOException { if (!isLazy && useBloom) { throw new IllegalArgumentException( "Multi-column Bloom filter " + "optimization requires a lazy seek"); @@ -406,7 +409,7 @@ KeyValueScanner getCurrentForTesting() { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { // here we return the next index key from the top scanner return current == null ? null : current.getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index c3b60792fb65..bfe47772f1aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.function.IntConsumer; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -38,27 +38,27 @@ public interface KeyValueScanner extends Shipper, Closeable { * The byte array represents for NO_NEXT_INDEXED_KEY; The actual value is irrelevant because this * is always compared by reference. */ - public static final Cell NO_NEXT_INDEXED_KEY = new KeyValue(); + public static final ExtendedCell NO_NEXT_INDEXED_KEY = new KeyValue(); /** * Look at the next Cell in this scanner, but do not iterate scanner. NOTICE: The returned cell * has not been passed into ScanQueryMatcher. So it may not be what the user need. * @return the next Cell */ - Cell peek(); + ExtendedCell peek(); /** * Return the next Cell in this scanner, iterating the scanner * @return the next Cell */ - Cell next() throws IOException; + ExtendedCell next() throws IOException; /** * Seek the scanner at or after the specified KeyValue. * @param key seek value * @return true if scanner has values left, false if end of scanner */ - boolean seek(Cell key) throws IOException; + boolean seek(ExtendedCell key) throws IOException; /** * Reseek the scanner at or after the specified KeyValue. This method is guaranteed to seek at or @@ -67,7 +67,7 @@ public interface KeyValueScanner extends Shipper, Closeable { * @param key seek value (should be non-null) * @return true if scanner has values left, false if end of scanner */ - boolean reseek(Cell key) throws IOException; + boolean reseek(ExtendedCell key) throws IOException; /** * Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners. This is @@ -105,7 +105,7 @@ default long getScannerOrder() { * @param forward do a forward-only "reseek" instead of a random-access seek * @param useBloom whether to enable multi-column Bloom filter optimization */ - boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException; + boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) throws IOException; /** * We optimize our store scanners by checking the most recent store file first, so we sometimes @@ -148,7 +148,7 @@ default long getScannerOrder() { * @param key seek KeyValue * @return true if the scanner is at the valid KeyValue, false if such KeyValue does not exist */ - public boolean backwardSeek(Cell key) throws IOException; + public boolean backwardSeek(ExtendedCell key) throws IOException; /** * Seek the scanner at the first Cell of the row which is the previous row of specified key @@ -156,7 +156,7 @@ default long getScannerOrder() { * @return true if the scanner at the first valid Cell of previous row, false if not existing such * Cell */ - public boolean seekToPreviousRow(Cell key) throws IOException; + public boolean seekToPreviousRow(ExtendedCell key) throws IOException; /** * Seek the scanner at the first KeyValue of last row @@ -169,5 +169,5 @@ default long getScannerOrder() { * between last key of current block and first key of next block.. see * HFileWriterImpl#getMidpoint, or null if not known. */ - public Cell getNextIndexedKey(); + public ExtendedCell getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index cd8eecd54301..ed8e6a2cd8ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.yetus.audience.InterfaceAudience; @@ -65,14 +66,14 @@ public interface MemStore extends Closeable { * @param memstoreSizing The delta in memstore size will be passed back via this. This will * include both data size and heap overhead delta. */ - void add(final Cell cell, MemStoreSizing memstoreSizing); + void add(final ExtendedCell cell, MemStoreSizing memstoreSizing); /** * Write the updates * @param memstoreSizing The delta in memstore size will be passed back via this. This will * include both data size and heap overhead delta. */ - void add(Iterable cells, MemStoreSizing memstoreSizing); + void add(Iterable cells, MemStoreSizing memstoreSizing); /** Returns Oldest timestamp of all the Cells in the MemStore */ long timeOfOldestEdit(); @@ -92,7 +93,7 @@ public interface MemStore extends Closeable { * @param memstoreSizing The delta in memstore size will be passed back via this. This will * include both data size and heap overhead delta. */ - void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing); + void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing); /** * @return scanner over the memstore. This might include scanner over the snapshot when one is diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java index 21e722b8c029..281dac85a270 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java @@ -22,8 +22,8 @@ import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.yetus.audience.InterfaceAudience; @@ -41,9 +41,9 @@ public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator private static final Logger LOG = LoggerFactory.getLogger(MemStoreCompactorSegmentsIterator.class); - private final List kvs = new ArrayList<>(); + private final List kvs = new ArrayList<>(); private boolean hasMore = true; - private Iterator kvsIterator; + private Iterator kvsIterator; // scanner on top of pipeline scanner that uses ScanQueryMatcher private InternalScanner compactingScanner; @@ -71,7 +71,7 @@ public boolean hasNext() { } @Override - public Cell next() { + public ExtendedCell next() { if (!hasNext()) { throw new NoSuchElementException(); } @@ -132,7 +132,7 @@ private InternalScanner createScanner(HStore store, List scanne } } - /* + /** * Refill kev-value set (should be invoked only when KVS is empty) Returns true if KVS is * non-empty */ @@ -145,7 +145,10 @@ private boolean refillKVS() { kvs.clear(); for (;;) { try { - hasMore = compactingScanner.next(kvs, scannerContext); + // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but + // all the server side implementation should only add ExtendedCell to the List, otherwise it + // will cause serious assertions in our code + hasMore = compactingScanner.next((List) kvs, scannerContext); } catch (IOException e) { // should not happen as all data are in memory throw new IllegalStateException(e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 4edefaf7ca0d..22fd49a9918f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -38,8 +38,8 @@ * collection occurs. *

    * This manages the large sized chunks. When Cells are to be added to Memstore, MemStoreLAB's - * {@link #copyCellInto(Cell)} gets called. This allocates enough size in the chunk to hold this - * cell's data and copies into this area and then recreate a Cell over this copied data. + * {@link #copyCellInto(ExtendedCell)} gets called. This allocates enough size in the chunk to hold + * this cell's data and copies into this area and then recreate a Cell over this copied data. *

    * @see ChunkCreator */ @@ -68,7 +68,7 @@ public interface MemStoreLAB { * Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance * over the copied the data. When this MemStoreLAB can not copy this Cell, it returns null. */ - Cell copyCellInto(Cell cell); + ExtendedCell copyCellInto(ExtendedCell cell); /** * Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance @@ -78,7 +78,7 @@ public interface MemStoreLAB { * called while the process of flattening to CellChunkMap is running, for forcing the allocation * of big cells on this MSLAB. */ - Cell forceCopyOfBigCellInto(Cell cell); + ExtendedCell forceCopyOfBigCellInto(ExtendedCell cell); /** * Close instance since it won't be used any more, try to put the chunks back to pool diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java index 41dd270cd22e..52829255df4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType; import org.apache.yetus.audience.InterfaceAudience; @@ -112,7 +111,7 @@ public MemStoreLABImpl(Configuration conf) { } @Override - public Cell copyCellInto(Cell cell) { + public ExtendedCell copyCellInto(ExtendedCell cell) { // See head of copyBBECellInto for how it differs from copyCellInto return (cell instanceof ByteBufferExtendedCell) ? copyBBECellInto((ByteBufferExtendedCell) cell, maxAlloc) @@ -125,7 +124,7 @@ public Cell copyCellInto(Cell cell) { * MSLAB, during this process, the big cells are copied into MSLAB using this method. */ @Override - public Cell forceCopyOfBigCellInto(Cell cell) { + public ExtendedCell forceCopyOfBigCellInto(ExtendedCell cell) { int size = Segment.getCellLength(cell); Preconditions.checkArgument(size >= 0, "negative size"); if (size + ChunkCreator.SIZEOF_CHUNK_HEADER <= dataChunkSize) { @@ -145,7 +144,7 @@ public Cell forceCopyOfBigCellInto(Cell cell) { * it was too big. Uses less CPU. See HBASE-20875 for evidence. * @see #copyCellInto(Cell, int) */ - private Cell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) { + private ExtendedCell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) { int size = cell.getSerializedSize(); Preconditions.checkArgument(size >= 0, "negative size"); // Callers should satisfy large allocations from JVM heap so limit fragmentation. @@ -179,7 +178,7 @@ private Cell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) { /** * @see #copyBBECellInto(ByteBufferExtendedCell, int) */ - private Cell copyCellInto(Cell cell, int maxAlloc) { + private ExtendedCell copyCellInto(ExtendedCell cell, int maxAlloc) { int size = Segment.getCellLength(cell); Preconditions.checkArgument(size >= 0, "negative size"); // Callers should satisfy large allocations directly from JVM since they @@ -216,16 +215,10 @@ private Cell copyCellInto(Cell cell, int maxAlloc) { * out of it * @see #copyBBECToChunkCell(ByteBufferExtendedCell, ByteBuffer, int, int) */ - private static Cell copyToChunkCell(Cell cell, ByteBuffer buf, int offset, int len) { + private static ExtendedCell copyToChunkCell(ExtendedCell cell, ByteBuffer buf, int offset, + int len) { int tagsLen = cell.getTagsLength(); - if (cell instanceof ExtendedCell) { - ((ExtendedCell) cell).write(buf, offset); - } else { - // Normally all Cell impls within Server will be of type ExtendedCell. Just considering the - // other case also. The data fragments within Cell is copied into buf as in KeyValue - // serialization format only. - KeyValueUtil.appendTo(cell, buf, offset, true); - } + cell.write(buf, offset); return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId()); } @@ -234,14 +227,14 @@ private static Cell copyToChunkCell(Cell cell, ByteBuffer buf, int offset, int l * out of it * @see #copyToChunkCell(Cell, ByteBuffer, int, int) */ - private static Cell copyBBECToChunkCell(ByteBufferExtendedCell cell, ByteBuffer buf, int offset, - int len) { + private static ExtendedCell copyBBECToChunkCell(ByteBufferExtendedCell cell, ByteBuffer buf, + int offset, int len) { int tagsLen = cell.getTagsLength(); cell.write(buf, offset); return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId()); } - private static Cell createChunkCell(ByteBuffer buf, int offset, int len, int tagsLen, + private static ExtendedCell createChunkCell(ByteBuffer buf, int offset, int len, int tagsLen, long sequenceId) { // TODO : write the seqid here. For writing seqId we should create a new cell type so // that seqId is not used as the state diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java index 0a91ada9e07e..1cb8c717c4c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -61,7 +61,7 @@ public boolean hasNext() { } @Override - public Cell next() { + public ExtendedCell next() { try { // try to get next if (!closed && heap != null) { return heap.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java index 4657b060a8d0..14738d7a10a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.Iterator; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -28,7 +28,7 @@ * not thread-safe and must have only one instance per MemStore in each period of time */ @InterfaceAudience.Private -public abstract class MemStoreSegmentsIterator implements Iterator { +public abstract class MemStoreSegmentsIterator implements Iterator { protected final ScannerContext scannerContext; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java index 945bcef05756..9de37c3f40cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobCell; import org.apache.hadoop.hbase.mob.MobUtils; @@ -73,7 +74,8 @@ public boolean next(List outResult, ScannerContext ctx) throws IOException long mobKVCount = 0; long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { - Cell cell = outResult.get(i); + // At server side, we should only get ExtendedCell + ExtendedCell cell = (ExtendedCell) outResult.get(i); if (MobUtils.isMobReferenceCell(cell)) { MobCell mobCell = mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java index 53cb1bcdbc91..6ebd1cf9c9cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java @@ -20,9 +20,9 @@ import java.util.Iterator; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -41,8 +41,8 @@ public class MutableSegment extends Segment { ClassSize.align(Segment.DEEP_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.SYNC_TIMERANGE_TRACKER + ClassSize.REFERENCE + ClassSize.ATOMIC_BOOLEAN); - protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, - MemStoreSizing memstoreSizing) { + protected MutableSegment(CellSet cellSet, CellComparator comparator, + MemStoreLAB memStoreLAB, MemStoreSizing memstoreSizing) { super(cellSet, comparator, memStoreLAB, TimeRangeTracker.create(TimeRangeTracker.Type.SYNC)); incMemStoreSize(0, DEEP_OVERHEAD, 0, 0); // update the mutable segment metadata if (memstoreSizing != null) { @@ -55,24 +55,25 @@ protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB * @param cell the cell to add * @param mslabUsed whether using MSLAB */ - public void add(Cell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, + public void add(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) { internalAdd(cell, mslabUsed, memStoreSizing, sizeAddedPreOperation); } - public void upsert(Cell cell, long readpoint, MemStoreSizing memStoreSizing, + public void upsert(ExtendedCell cell, long readpoint, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) { internalAdd(cell, false, memStoreSizing, sizeAddedPreOperation); // Get the Cells for the row/family/qualifier regardless of timestamp. // For this case we want to clean up any other puts - Cell firstCell = PrivateCellUtil.createFirstOnRowColTS(cell, HConstants.LATEST_TIMESTAMP); - SortedSet ss = this.tailSet(firstCell); - Iterator it = ss.iterator(); + ExtendedCell firstCell = + PrivateCellUtil.createFirstOnRowColTS(cell, HConstants.LATEST_TIMESTAMP); + SortedSet ss = this.tailSet(firstCell); + Iterator it = ss.iterator(); // versions visible to oldest scanner int versionsVisible = 0; while (it.hasNext()) { - Cell cur = it.next(); + ExtendedCell cur = it.next(); if (cell == cur) { // ignore the one just put in @@ -118,7 +119,7 @@ public boolean setInMemoryFlushed() { * Returns the first cell in the segment * @return the first cell in the segment */ - Cell first() { + ExtendedCell first() { return this.getCellSet().first(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java index 8f1898a3c658..f55bbcc639de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java @@ -21,7 +21,7 @@ import java.util.function.IntConsumer; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -33,7 +33,8 @@ public abstract class NonLazyKeyValueScanner implements KeyValueScanner { @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) + throws IOException { return doRealSeek(this, kv, forward); } @@ -47,7 +48,7 @@ public void enforceSeek() throws IOException { throw new NotImplementedException("enforceSeek must not be called on a " + "non-lazy scanner"); } - public static boolean doRealSeek(KeyValueScanner scanner, Cell kv, boolean forward) + public static boolean doRealSeek(KeyValueScanner scanner, ExtendedCell kv, boolean forward) throws IOException { return forward ? scanner.reseek(kv) : scanner.seek(kv); } @@ -76,7 +77,7 @@ public Path getFilePath() { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java index 2c2cf26e9fa7..e5eebd85b547 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java @@ -19,7 +19,7 @@ import java.io.IOException; import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -30,13 +30,13 @@ public abstract class NonReversedNonLazyKeyValueScanner extends NonLazyKeyValueScanner { @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { throw new NotImplementedException( "backwardSeek must not be called on a " + "non-reversed scanner"); } @Override - public boolean seekToPreviousRow(Cell key) throws IOException { + public boolean seekToPreviousRow(ExtendedCell key) throws IOException { throw new NotImplementedException( "seekToPreviousRow must not be called on a " + "non-reversed scanner"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java index d829b1961070..a5fc2947bee0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -441,7 +442,7 @@ private boolean nextInternal(List results, ScannerContext scannerContext) region.checkInterrupt(); // Let's see what we have in the storeHeap. - Cell current = this.storeHeap.peek(); + ExtendedCell current = this.storeHeap.peek(); boolean shouldStop = shouldStop(current); // When has filter row is true it means that the all the cells for a particular row must be @@ -651,7 +652,7 @@ private void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { } /** Returns true when the joined heap may have data for the current row */ - private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { + private boolean joinedHeapMayHaveData(ExtendedCell currentRowCell) throws IOException { Cell nextJoinedKv = joinedHeap.peek(); boolean matchCurrentRow = nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); @@ -660,7 +661,7 @@ private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { // If the next value in the joined heap does not match the current row, try to seek to the // correct row if (!matchCurrentRow) { - Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); + ExtendedCell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); @@ -776,7 +777,7 @@ public synchronized boolean reseek(byte[] row) throws IOException { } boolean result = false; region.startRegionOperation(); - Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); + ExtendedCell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); try { // use request seek to make use of the lazy seek option. See HBASE-5520 result = this.storeHeap.requestSeek(kv, true, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java index 60c634578aa7..522aac80aca8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; @@ -43,22 +44,23 @@ public ReversedKeyValueHeap(List scanners, CellCompar } @Override - public boolean seek(Cell seekKey) throws IOException { + public boolean seek(ExtendedCell seekKey) throws IOException { throw new IllegalStateException("seek cannot be called on ReversedKeyValueHeap"); } @Override - public boolean reseek(Cell seekKey) throws IOException { + public boolean reseek(ExtendedCell seekKey) throws IOException { throw new IllegalStateException("reseek cannot be called on ReversedKeyValueHeap"); } @Override - public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell key, boolean forward, boolean useBloom) + throws IOException { throw new IllegalStateException("requestSeek cannot be called on ReversedKeyValueHeap"); } @Override - public boolean seekToPreviousRow(Cell seekKey) throws IOException { + public boolean seekToPreviousRow(ExtendedCell seekKey) throws IOException { if (current == null) { return false; } @@ -87,7 +89,7 @@ public boolean seekToPreviousRow(Cell seekKey) throws IOException { } @Override - public boolean backwardSeek(Cell seekKey) throws IOException { + public boolean backwardSeek(ExtendedCell seekKey) throws IOException { if (current == null) { return false; } @@ -116,12 +118,12 @@ public boolean backwardSeek(Cell seekKey) throws IOException { } @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { if (this.current == null) { return null; } - Cell kvReturn = this.current.next(); - Cell kvNext = this.current.peek(); + ExtendedCell kvReturn = this.current.next(); + ExtendedCell kvNext = this.current.peek(); if (kvNext == null || this.comparator.kvComparator.compareRows(kvNext, kvReturn) > 0) { if (this.current.seekToPreviousRow(kvReturn)) { this.heap.add(this.current); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java index 7863493e3282..505cd5dedcee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobCell; import org.apache.hadoop.hbase.mob.MobUtils; @@ -73,9 +74,10 @@ public boolean next(List outResult, ScannerContext ctx) throws IOException long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { Cell cell = outResult.get(i); + assert cell instanceof ExtendedCell; if (MobUtils.isMobReferenceCell(cell)) { - MobCell mobCell = - mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); + MobCell mobCell = mobStore.resolve((ExtendedCell) cell, cacheMobBlocks, readPt, + readEmptyValueOnMobCellMiss); mobKVCount++; mobKVSize += mobCell.getCell().getValueLength(); outResult.set(i, mobCell.getCell()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java index d0ea2e08d173..e9eeba2b6f4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -58,7 +59,7 @@ protected KeyValueHeap newKVHeap(List scanners, } @Override - protected void seekScanners(List scanners, Cell seekKey, + protected void seekScanners(List scanners, ExtendedCell seekKey, boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the previous matching Row). @@ -74,7 +75,7 @@ protected void seekScanners(List scanners, Cell seekK } @Override - protected boolean seekToNextRow(Cell kv) throws IOException { + protected boolean seekToNextRow(ExtendedCell kv) throws IOException { return seekToPreviousRow(kv); } @@ -82,7 +83,7 @@ protected boolean seekToNextRow(Cell kv) throws IOException { * Do a backwardSeek in a reversed StoreScanner(scan backward) */ @Override - protected boolean seekAsDirection(Cell kv) throws IOException { + protected boolean seekAsDirection(ExtendedCell kv) throws IOException { return backwardSeek(kv); } @@ -98,17 +99,17 @@ protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) } @Override - public boolean reseek(Cell kv) throws IOException { + public boolean reseek(ExtendedCell kv) throws IOException { throw new IllegalStateException("reseek cannot be called on ReversedStoreScanner"); } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { throw new IllegalStateException("seek cannot be called on ReversedStoreScanner"); } @Override - public boolean seekToPreviousRow(Cell key) throws IOException { + public boolean seekToPreviousRow(ExtendedCell key) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } @@ -116,7 +117,7 @@ public boolean seekToPreviousRow(Cell key) throws IOException { } @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 272dd4069629..b5e6192a0c32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; @@ -50,7 +51,7 @@ public abstract class Segment implements MemStoreSizing { public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE + ClassSize.CELL_SET + 2 * ClassSize.ATOMIC_LONG + ClassSize.REENTRANT_LOCK; - private AtomicReference cellSet = new AtomicReference<>(); + private AtomicReference> cellSet = new AtomicReference<>(); private final CellComparator comparator; private ReentrantReadWriteLock updatesLock; protected long minSequenceId; @@ -93,8 +94,8 @@ protected Segment(CellComparator comparator, List segments, } // This constructor is used to create empty Segments. - protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, - TimeRangeTracker trt) { + protected Segment(CellSet cellSet, CellComparator comparator, + MemStoreLAB memStoreLAB, TimeRangeTracker trt) { this.cellSet.set(cellSet); this.comparator = comparator; this.updatesLock = new ReentrantReadWriteLock(); @@ -154,12 +155,12 @@ public void close() { * set to 'true' and the cell is copied into MSLAB. * @return either the given cell or its clone */ - public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) { + public ExtendedCell maybeCloneWithAllocator(ExtendedCell cell, boolean forceCloneOfBigCell) { if (this.memStoreLAB == null) { return cell; } - Cell cellFromMslab; + ExtendedCell cellFromMslab; if (forceCloneOfBigCell) { cellFromMslab = this.memStoreLAB.forceCopyOfBigCellInto(cell); } else { @@ -202,7 +203,7 @@ public void decScannerCount() { * @return this object */ - protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) { + protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) { this.cellSet.compareAndSet(cellSetOld, cellSetNew); return this; } @@ -265,15 +266,15 @@ public TimeRangeTracker getTimeRangeTracker() { } // *** Methods for SegmentsScanner - public Cell last() { + public ExtendedCell last() { return getCellSet().last(); } - public Iterator iterator() { + public Iterator iterator() { return getCellSet().iterator(); } - public SortedSet headSet(Cell firstKeyOnRow) { + public SortedSet headSet(ExtendedCell firstKeyOnRow) { return getCellSet().headSet(firstKeyOnRow); } @@ -286,7 +287,7 @@ public int compareRows(Cell left, Cell right) { } /** Returns a set of all cells in the segment */ - protected CellSet getCellSet() { + protected CellSet getCellSet() { return cellSet.get(); } @@ -298,13 +299,13 @@ protected CellComparator getComparator() { return comparator; } - protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, + protected void internalAdd(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { boolean succ = getCellSet().add(cell); updateMetaInfo(cell, succ, mslabUsed, memstoreSizing, sizeAddedPreOperation); } - protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, + protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { long delta = 0; long cellSize = getCellLength(cellToAdd); @@ -335,7 +336,8 @@ protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, } } - protected void updateMetaInfo(Cell cellToAdd, boolean succ, MemStoreSizing memstoreSizing) { + protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, + MemStoreSizing memstoreSizing) { updateMetaInfo(cellToAdd, succ, (getMemStoreLAB() != null), memstoreSizing, false); } @@ -396,7 +398,7 @@ protected long indexEntryOffHeapSize(boolean offHeap) { * @param firstCell a cell in the segment * @return a subset of the segment cell set, which starts with the given cell */ - protected SortedSet tailSet(Cell firstCell) { + protected SortedSet tailSet(ExtendedCell firstCell) { return getCellSet().tailSet(firstCell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index a05ac364fc01..f263bf01fe24 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -139,7 +140,7 @@ private ImmutableSegment createImmutableSegment(final Configuration conf, private MutableSegment generateMutableSegment(final Configuration conf, CellComparator comparator, MemStoreLAB memStoreLAB, MemStoreSizing memstoreSizing) { // TBD use configuration to set type of segment - CellSet set = new CellSet(comparator); + CellSet set = new CellSet<>(comparator); return new MutableSegment(set, comparator, memStoreLAB, memstoreSizing); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index 1d28c55570ed..4d380d936f58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -24,6 +24,7 @@ import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -40,17 +41,17 @@ public class SegmentScanner implements KeyValueScanner { private long readPoint; // the current iterator that can be reinitialized by // seek(), backwardSeek(), or reseek() - protected Iterator iter; + protected Iterator iter; // the pre-calculated cell to be returned by peek() - protected Cell current = null; + protected ExtendedCell current = null; // or next() // A flag represents whether could stop skipping KeyValues for MVCC // if have encountered the next row. Only used for reversed scan private boolean stopSkippingKVsIfNextRow = false; // Stop skipping KeyValues for MVCC if finish this row. Only used for reversed scan - private Cell stopSkippingKVsRow; + private ExtendedCell stopSkippingKVsRow; // last iterated KVs by seek (to restore the iterator state after reseek) - private Cell last = null; + private ExtendedCell last = null; // flag to indicate if this scanner is closed protected boolean closed = false; @@ -77,7 +78,7 @@ protected SegmentScanner(Segment segment, long readPoint) { * @return the currently observed Cell */ @Override - public Cell peek() { // sanity check, the current should be always valid + public ExtendedCell peek() { // sanity check, the current should be always valid if (closed) { return null; } @@ -93,11 +94,11 @@ public Cell peek() { // sanity check, the current should be always valid * @return the next Cell or null if end of scanner */ @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { if (closed) { return null; } - Cell oldCurrent = current; + ExtendedCell oldCurrent = current; updateCurrent(); // update the currently observed Cell return oldCurrent; } @@ -108,7 +109,7 @@ public Cell next() throws IOException { * @return true if scanner has values left, false if end of scanner */ @Override - public boolean seek(Cell cell) throws IOException { + public boolean seek(ExtendedCell cell) throws IOException { if (closed) { return false; } @@ -124,7 +125,7 @@ public boolean seek(Cell cell) throws IOException { return (current != null); } - protected Iterator getIterator(Cell cell) { + protected Iterator getIterator(ExtendedCell cell) { return segment.tailSet(cell).iterator(); } @@ -136,7 +137,7 @@ protected Iterator getIterator(Cell cell) { * @return true if scanner has values left, false if end of scanner */ @Override - public boolean reseek(Cell cell) throws IOException { + public boolean reseek(ExtendedCell cell) throws IOException { if (closed) { return false; } @@ -161,7 +162,7 @@ public boolean reseek(Cell cell) throws IOException { * @return true if the scanner is at the valid KeyValue, false if such Cell does not exist */ @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { if (closed) { return false; } @@ -179,21 +180,21 @@ public boolean backwardSeek(Cell key) throws IOException { * Cell */ @Override - public boolean seekToPreviousRow(Cell cell) throws IOException { + public boolean seekToPreviousRow(ExtendedCell cell) throws IOException { if (closed) { return false; } boolean keepSeeking; Cell key = cell; do { - Cell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key); - SortedSet cellHead = segment.headSet(firstKeyOnRow); + ExtendedCell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key); + SortedSet cellHead = segment.headSet(firstKeyOnRow); Cell lastCellBeforeRow = cellHead.isEmpty() ? null : cellHead.last(); if (lastCellBeforeRow == null) { current = null; return false; } - Cell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow); + ExtendedCell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow); this.stopSkippingKVsIfNextRow = true; this.stopSkippingKVsRow = firstKeyOnPreviousRow; seek(firstKeyOnPreviousRow); @@ -220,12 +221,12 @@ public boolean seekToLastRow() throws IOException { if (closed) { return false; } - Cell higherCell = segment.isEmpty() ? null : segment.last(); + ExtendedCell higherCell = segment.isEmpty() ? null : segment.last(); if (higherCell == null) { return false; } - Cell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell); + ExtendedCell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell); if (seek(firstCellOnLastRow)) { return true; @@ -258,7 +259,7 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) } @Override - public boolean requestSeek(Cell c, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell c, boolean forward, boolean useBloom) throws IOException { return NonLazyKeyValueScanner.doRealSeek(this, c, forward); } @@ -302,7 +303,7 @@ public Path getFilePath() { * otherwise Not relevant for in-memory scanner */ @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } @@ -334,7 +335,7 @@ private Segment getSegment() { * Private internal method for iterating over the segment, skipping the cells with irrelevant MVCC */ protected void updateCurrent() { - Cell next = null; + ExtendedCell next = null; try { while (iter.hasNext()) { @@ -363,7 +364,7 @@ protected void updateCurrent() { * Private internal method that returns the higher of the two key values, or null if they are both * null */ - private Cell getHighest(Cell first, Cell second) { + private ExtendedCell getHighest(ExtendedCell first, ExtendedCell second) { if (first == null && second == null) { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java index 3109920dffae..fdfcf4cf7d28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Iterator; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -30,8 +30,8 @@ @InterfaceAudience.Private public class SnapshotSegmentScanner extends NonReversedNonLazyKeyValueScanner { private final ImmutableSegment segment; - private Iterator iter; - private Cell current; + private Iterator iter; + private ExtendedCell current; public SnapshotSegmentScanner(ImmutableSegment segment) { this.segment = segment; @@ -42,18 +42,18 @@ public SnapshotSegmentScanner(ImmutableSegment segment) { } } - private static Iterator createIterator(Segment segment) { + private static Iterator createIterator(Segment segment) { return segment.getCellSet().iterator(); } @Override - public Cell peek() { + public ExtendedCell peek() { return current; } @Override - public Cell next() { - Cell oldCurrent = current; + public ExtendedCell next() { + ExtendedCell oldCurrent = current; if (iter.hasNext()) { current = iter.next(); } else { @@ -63,16 +63,16 @@ public Cell next() { } @Override - public boolean seek(Cell seekCell) { + public boolean seek(ExtendedCell seekCell) { // restart iterator this.iter = createIterator(this.segment); return reseek(seekCell); } @Override - public boolean reseek(Cell seekCell) { + public boolean reseek(ExtendedCell seekCell) { while (this.iter.hasNext()) { - Cell next = this.iter.next(); + ExtendedCell next = this.iter.next(); int ret = this.segment.getComparator().compare(next, seekCell); if (ret >= 0) { this.current = next; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 70edc5a82246..bab382defe7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -21,8 +21,8 @@ import java.util.Optional; import java.util.OptionalLong; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -42,12 +42,12 @@ public interface StoreFile { /** * Get the first key in this store file. */ - Optional getFirstKey(); + Optional getFirstKey(); /** * Get the last key in this store file. */ - Optional getLastKey(); + Optional getLastKey(); /** * Get the comparator for comparing two cells. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 7751df300e19..c6e1dfe01718 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -430,8 +431,8 @@ private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloo * @return true if there is overlap, false otherwise */ public boolean passesKeyRangeFilter(Scan scan) { - Optional firstKeyKV = this.getFirstKey(); - Optional lastKeyKV = this.getLastKey(); + Optional firstKeyKV = this.getFirstKey(); + Optional lastKeyKV = this.getLastKey(); if (!firstKeyKV.isPresent() || !lastKeyKV.isPresent()) { // the file is empty return false; @@ -557,7 +558,7 @@ private void setDeleteFamilyBloomFilterFaulty() { this.deleteFamilyBloomFilter = null; } - public Optional getLastKey() { + public Optional getLastKey() { return reader.getLastKey(); } @@ -565,7 +566,7 @@ public Optional getLastRowKey() { return reader.getLastRowKey(); } - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { return reader.midKey(); } @@ -585,7 +586,7 @@ public long getDeleteFamilyCnt() { return deleteFamilyCnt; } - public Optional getFirstKey() { + public Optional getFirstKey() { return reader.getFirstKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index fd941de4df87..6e0824a16c62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -49,12 +50,12 @@ public class StoreFileScanner implements KeyValueScanner { // the reader it comes from: private final StoreFileReader reader; private final HFileScanner hfs; - private Cell cur = null; + private ExtendedCell cur = null; private boolean closed = false; private boolean realSeekDone; private boolean delayedReseek; - private Cell delayedSeekKV; + private ExtendedCell delayedSeekKV; private final boolean enforceMVCC; private final boolean hasMVCCInfo; @@ -193,13 +194,13 @@ public String toString() { } @Override - public Cell peek() { + public ExtendedCell peek() { return cur; } @Override - public Cell next() throws IOException { - Cell retKey = cur; + public ExtendedCell next() throws IOException { + ExtendedCell retKey = cur; try { // only seek if we aren't at the end. cur == null implies 'end'. @@ -219,7 +220,7 @@ public Cell next() throws IOException { } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { if (seekCount != null) seekCount.increment(); try { @@ -248,7 +249,7 @@ public boolean seek(Cell key) throws IOException { } @Override - public boolean reseek(Cell key) throws IOException { + public boolean reseek(ExtendedCell key) throws IOException { if (seekCount != null) seekCount.increment(); try { @@ -275,7 +276,7 @@ public boolean reseek(Cell key) throws IOException { } } - protected void setCurrentCell(Cell newVal) throws IOException { + protected void setCurrentCell(ExtendedCell newVal) throws IOException { this.cur = newVal; if (this.cur != null && this.reader.isBulkLoaded() && !this.reader.isSkipResetSeqId()) { PrivateCellUtil.setSequenceId(cur, this.reader.getSequenceID()); @@ -315,7 +316,7 @@ public void close() { } /** Returns false if not found or if k is after the end. */ - public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { + public static boolean seekAtOrAfter(HFileScanner s, ExtendedCell k) throws IOException { int result = s.seekTo(k); if (result < 0) { if (result == HConstants.INDEX_KEY_MAGIC) { @@ -333,7 +334,7 @@ public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { return true; } - static boolean reseekAtOrAfter(HFileScanner s, Cell k) throws IOException { + static boolean reseekAtOrAfter(HFileScanner s, ExtendedCell k) throws IOException { // This function is similar to seekAtOrAfter function int result = s.reseekTo(k); if (result <= 0) { @@ -375,7 +376,8 @@ public long getScannerOrder() { * the next row/column and use OLDEST_TIMESTAMP in the seek key. */ @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) + throws IOException { if (kv.getFamilyLength() == 0) { useBloom = false; } @@ -498,7 +500,7 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) } @Override - public boolean seekToPreviousRow(Cell originalKey) throws IOException { + public boolean seekToPreviousRow(ExtendedCell originalKey) throws IOException { try { if (isFastSeekingEncoding) { return seekToPreviousRowStateless(originalKey); @@ -528,7 +530,7 @@ public boolean seekToPreviousRow(Cell originalKey) throws IOException { private boolean seekToPreviousRowWithHint() throws IOException { do { // Using our existing seek hint, set our next seek hint - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(previousRow); + ExtendedCell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(previousRow); seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow); // Reseek back to our initial seek hint (i.e. what we think is the start of the @@ -560,13 +562,13 @@ private boolean seekToPreviousRowWithHint() throws IOException { */ private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOException { // Rewind to the cell before the beginning of this row - Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey); + ExtendedCell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey); if (!seekBefore(keyAtBeginningOfRow)) { return false; } // Rewind before this row and save what we find as a seek hint - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); + ExtendedCell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow); // Seek back to the start of the previous row @@ -598,15 +600,15 @@ private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOExceptio * It should be used if the cost for seeking is lower i.e. when using a fast seeking data block * encoding like RIV1. */ - private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException { - Cell key = originalKey; + private boolean seekToPreviousRowStateless(ExtendedCell originalKey) throws IOException { + ExtendedCell key = originalKey; do { - Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key); + ExtendedCell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key); if (!seekBefore(keyAtBeginningOfRow)) { return false; } - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); + ExtendedCell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); if (!seekAtOrAfter(firstKeyOfPreviousRow)) { return false; } @@ -618,7 +620,7 @@ private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException } while (true); } - private boolean seekBefore(Cell seekKey) throws IOException { + private boolean seekBefore(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -638,7 +640,7 @@ private boolean seekBefore(Cell seekKey) throws IOException { * being null again via this method, that's because there doesn't exist a row before the seek * target in the storefile (i.e. we're at the beginning of the storefile) */ - private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException { + private void seekBeforeAndSaveKeyToPreviousRow(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -653,7 +655,7 @@ private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException } } - private boolean seekAtOrAfter(Cell seekKey) throws IOException { + private boolean seekAtOrAfter(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -665,7 +667,7 @@ private boolean seekAtOrAfter(Cell seekKey) throws IOException { return true; } - private boolean reseekAtOrAfter(Cell seekKey) throws IOException { + private boolean reseekAtOrAfter(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -700,7 +702,7 @@ public boolean seekToLastRow() throws IOException { if (!lastRow.isPresent()) { return false; } - Cell seekKey = PrivateCellUtil.createFirstOnRow(lastRow.get()); + ExtendedCell seekKey = PrivateCellUtil.createFirstOnRow(lastRow.get()); if (seek(seekKey)) { return true; } else { @@ -709,7 +711,7 @@ public boolean seekToLastRow() throws IOException { } @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { seek(key); if (cur == null || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); @@ -718,7 +720,7 @@ public boolean backwardSeek(Cell key) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return hfs.getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index 67fa2244e957..c2a9b839c821 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -105,15 +106,15 @@ public class StoreFileWriter implements CellSink, ShipperListener { private final boolean shouldDropCacheBehind; private final Supplier> compactedFilesSupplier; private final CellComparator comparator; - private Cell lastCell; + private ExtendedCell lastCell; // The first (latest) delete family marker of the current row - private Cell deleteFamily; + private ExtendedCell deleteFamily; // The list of delete family version markers of the current row - private List deleteFamilyVersionList = new ArrayList<>(); + private List deleteFamilyVersionList = new ArrayList<>(); // The first (latest) delete column marker of the current column - private Cell deleteColumn; + private ExtendedCell deleteColumn; // The list of delete column version markers of the current column - private List deleteColumnVersionList = new ArrayList<>(); + private List deleteColumnVersionList = new ArrayList<>(); // The live put cell count for the current column private int livePutCellCount; private final int maxVersions; @@ -344,14 +345,14 @@ private void initColumnState() { } - private boolean isDeletedByDeleteFamily(Cell cell) { + private boolean isDeletedByDeleteFamily(ExtendedCell cell) { return deleteFamily != null && (deleteFamily.getTimestamp() > cell.getTimestamp() || (deleteFamily.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteFamily.getSequenceId()))); } - private boolean isDeletedByDeleteFamilyVersion(Cell cell) { - for (Cell deleteFamilyVersion : deleteFamilyVersionList) { + private boolean isDeletedByDeleteFamilyVersion(ExtendedCell cell) { + for (ExtendedCell deleteFamilyVersion : deleteFamilyVersionList) { if ( deleteFamilyVersion.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteFamilyVersion.getSequenceId()) @@ -362,14 +363,14 @@ private boolean isDeletedByDeleteFamilyVersion(Cell cell) { return false; } - private boolean isDeletedByDeleteColumn(Cell cell) { + private boolean isDeletedByDeleteColumn(ExtendedCell cell) { return deleteColumn != null && (deleteColumn.getTimestamp() > cell.getTimestamp() || (deleteColumn.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteColumn.getSequenceId()))); } - private boolean isDeletedByDeleteColumnVersion(Cell cell) { - for (Cell deleteColumnVersion : deleteColumnVersionList) { + private boolean isDeletedByDeleteColumnVersion(ExtendedCell cell) { + for (ExtendedCell deleteColumnVersion : deleteColumnVersionList) { if ( deleteColumnVersion.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteColumnVersion.getSequenceId()) @@ -380,12 +381,12 @@ private boolean isDeletedByDeleteColumnVersion(Cell cell) { return false; } - private boolean isDeleted(Cell cell) { + private boolean isDeleted(ExtendedCell cell) { return isDeletedByDeleteFamily(cell) || isDeletedByDeleteColumn(cell) || isDeletedByDeleteFamilyVersion(cell) || isDeletedByDeleteColumnVersion(cell); } - private void appendCell(Cell cell) throws IOException { + private void appendCell(ExtendedCell cell) throws IOException { if ((lastCell == null || !CellUtil.matchingColumn(lastCell, cell))) { initColumnState(); } @@ -458,11 +459,11 @@ private void appendCell(Cell cell) throws IOException { } @Override - public void appendAll(List cellList) throws IOException { + public void appendAll(List cellList) throws IOException { if (historicalFilePath == null) { // The dual writing is not enabled and all cells are written to one file. We use // the live version file in this case - for (Cell cell : cellList) { + for (ExtendedCell cell : cellList) { liveFileWriter.append(cell); } return; @@ -474,13 +475,13 @@ public void appendAll(List cellList) throws IOException { // It is a new row and thus time to reset the state initRowState(); } - for (Cell cell : cellList) { + for (ExtendedCell cell : cellList) { appendCell(cell); } } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { if (historicalFilePath == null) { // The dual writing is not enabled and all cells are written to one file. We use // the live version file in this case @@ -675,14 +676,14 @@ private void appendTrackedTimestampsToMetadata() throws IOException { * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker * to include the timestamp of this key */ - private void trackTimestamps(final Cell cell) { + private void trackTimestamps(final ExtendedCell cell) { if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); } timeRangeTracker.includeTimestamp(cell); } - private void appendGeneralBloomfilter(final Cell cell) throws IOException { + private void appendGeneralBloomfilter(final ExtendedCell cell) throws IOException { if (this.generalBloomFilterWriter != null) { /* * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue. @@ -694,7 +695,7 @@ private void appendGeneralBloomfilter(final Cell cell) throws IOException { } } - private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { + private void appendDeleteFamilyBloomFilter(final ExtendedCell cell) throws IOException { if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { return; } @@ -706,7 +707,7 @@ private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { } } - private void append(final Cell cell) throws IOException { + private void append(final ExtendedCell cell) throws IOException { appendGeneralBloomfilter(cell); appendDeleteFamilyBloomFilter(cell); writer.append(cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index 7aae769fda96..c0efe4074a28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -24,7 +24,7 @@ import java.util.function.Consumer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateConstants; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -120,7 +120,7 @@ protected void performFlush(InternalScanner scanner, CellSink sink, ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List kvs = new ArrayList<>(); + List kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush @@ -131,9 +131,12 @@ protected void performFlush(InternalScanner scanner, CellSink sink, } try { do { - hasMore = scanner.next(kvs, scannerContext); + // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but + // all the server side implementation should only add ExtendedCell to the List, otherwise it + // will cause serious assertions in our code + hasMore = scanner.next((List) kvs, scannerContext); if (!kvs.isEmpty()) { - for (Cell c : kvs) { + for (ExtendedCell c : kvs) { sink.append(c); if (control) { throughputController.control(flushName, c.getSerializedSize()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 89d4aa34e78c..7aa17d3233a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -108,7 +109,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * seeking to next row/column. TODO: estimate them? */ private long kvsScanned = 0; - private Cell prevCell = null; + private ExtendedCell prevCell = null; private final long preadMaxBytes; private long bytesRead; @@ -399,7 +400,7 @@ boolean isScanUsePread() { * @param isLazy true if using lazy seek * @param isParallelSeek true if using parallel seek */ - protected void seekScanners(List scanners, Cell seekKey, + protected void seekScanners(List scanners, ExtendedCell seekKey, boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). @@ -482,7 +483,7 @@ protected List selectScannersFrom(HStore store, } @Override - public Cell peek() { + public ExtendedCell peek() { return heap != null ? heap.peek() : null; } @@ -534,7 +535,7 @@ private void close(boolean withDelayedScannersClose) { } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } @@ -562,7 +563,7 @@ public boolean next(List outResult, ScannerContext scannerContext) throws return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } - Cell cell = this.heap.peek(); + ExtendedCell cell = this.heap.peek(); if (cell == null) { close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); @@ -644,7 +645,17 @@ public boolean next(List outResult, ScannerContext scannerContext) throws case INCLUDE_AND_SEEK_NEXT_COL: Filter f = matcher.getFilter(); if (f != null) { - cell = f.transformCell(cell); + Cell transformedCell = f.transformCell(cell); + // fast path, most filters just return the same cell instance + if (transformedCell != cell) { + if (transformedCell instanceof ExtendedCell) { + cell = (ExtendedCell) transformedCell; + } else { + throw new DoNotRetryIOException("Incorrect filter implementation, " + + "the Cell returned by transformCell is not an ExtendedCell. Filter class: " + + f.getClass().getName()); + } + } } this.countPerRow++; @@ -752,7 +763,7 @@ public boolean next(List outResult, ScannerContext scannerContext) throws break; case SEEK_NEXT_USING_HINT: - Cell nextKV = matcher.getNextKeyHint(cell); + ExtendedCell nextKV = matcher.getNextKeyHint(cell); if (nextKV != null) { int difference = comparator.compare(nextKV, cell); if ( @@ -826,7 +837,7 @@ private NextState needToReturn(List outResult) { return null; } - private void seekOrSkipToNextRow(Cell cell) throws IOException { + private void seekOrSkipToNextRow(ExtendedCell cell) throws IOException { // If it is a Get Scan, then we know that we are done with this row; there are no more // rows beyond the current one: don't try to optimize. if (!get) { @@ -837,7 +848,7 @@ private void seekOrSkipToNextRow(Cell cell) throws IOException { seekToNextRow(cell); } - private void seekOrSkipToNextColumn(Cell cell) throws IOException { + private void seekOrSkipToNextColumn(ExtendedCell cell) throws IOException { if (!trySkipToNextColumn(cell)) { seekAsDirection(matcher.getKeyForNextColumn(cell)); } @@ -894,13 +905,13 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException { * @param cell current cell * @return true means skip to next row, false means not */ - protected boolean trySkipToNextRow(Cell cell) throws IOException { - Cell nextCell = null; + protected boolean trySkipToNextRow(ExtendedCell cell) throws IOException { + ExtendedCell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison // when the identity changes we need to compare the bytes again - Cell previousIndexedKey = null; + ExtendedCell previousIndexedKey = null; do { - Cell nextIndexedKey = getNextIndexedKey(); + ExtendedCell nextIndexedKey = getNextIndexedKey(); if ( nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey @@ -917,17 +928,17 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { } /** - * See {@link org.apache.hadoop.hbase.regionserver.StoreScanner#trySkipToNextRow(Cell)} + * See {@link #trySkipToNextRow(ExtendedCell)} * @param cell current cell * @return true means skip to next column, false means not */ - protected boolean trySkipToNextColumn(Cell cell) throws IOException { - Cell nextCell = null; + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { + ExtendedCell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison // when the identity changes we need to compare the bytes again - Cell previousIndexedKey = null; + ExtendedCell previousIndexedKey = null; do { - Cell nextIndexedKey = getNextIndexedKey(); + ExtendedCell nextIndexedKey = getNextIndexedKey(); if ( nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey @@ -1023,7 +1034,7 @@ public void updateReaders(List sfs, List memStoreSc /** Returns if top of heap has changed (and KeyValueHeap has to try the next KV) */ protected final boolean reopenAfterFlush() throws IOException { // here we can make sure that we have a Store instance so no null check on store. - Cell lastTop = heap.peek(); + ExtendedCell lastTop = heap.peek(); // When we have the scan object, should we not pass it to getScanners() to get a limited set of // scanners? We did so in the constructor and we could have done it now by storing the scan // object from the constructor @@ -1068,11 +1079,11 @@ protected final boolean reopenAfterFlush() throws IOException { return topChanged; } - private void resetQueryMatcher(Cell lastTopKey) { + private void resetQueryMatcher(ExtendedCell lastTopKey) { // Reset the state of the Query Matcher and set to top row. // Only reset and call setRow if the row changes; avoids confusing the // query matcher if scanning intra-row. - Cell cell = heap.peek(); + ExtendedCell cell = heap.peek(); if (cell == null) { cell = lastTopKey; } @@ -1093,7 +1104,7 @@ protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; } - protected boolean seekToNextRow(Cell c) throws IOException { + protected boolean seekToNextRow(ExtendedCell c) throws IOException { return reseek(PrivateCellUtil.createLastOnRow(c)); } @@ -1101,12 +1112,12 @@ protected boolean seekToNextRow(Cell c) throws IOException { * Do a reseek in a normal StoreScanner(scan forward) * @return true if scanner has values left, false if end of scanner */ - protected boolean seekAsDirection(Cell kv) throws IOException { + protected boolean seekAsDirection(ExtendedCell kv) throws IOException { return reseek(kv); } @Override - public boolean reseek(Cell kv) throws IOException { + public boolean reseek(ExtendedCell kv) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } @@ -1126,7 +1137,7 @@ void trySwitchToStreamRead() { LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, this.store.getColumnFamilyName()); scanUsePread = false; - Cell lastTop = heap.peek(); + ExtendedCell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); List scannersToClose = new ArrayList<>(); for (KeyValueScanner kvs : currentScanners) { @@ -1192,7 +1203,7 @@ protected final boolean checkFlushed() { * @param scanners the list {@link KeyValueScanner}s to be read from * @param kv the KeyValue on which the operation is being requested */ - private void parallelSeek(final List scanners, final Cell kv) + private void parallelSeek(final List scanners, final ExtendedCell kv) throws IOException { if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); @@ -1245,7 +1256,7 @@ public long getEstimatedNumberOfKvsScanned() { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return this.heap.getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 98bb68f31fb0..c9ee019e9afe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompoundConfiguration; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -121,7 +122,7 @@ static Optional getFileSplitPoint(HStoreFile file, CellComparator compar // Get first, last, and mid keys. Midkey is the key that starts block // in middle of hfile. Has column and timestamp. Need to return just // the row we want to split on as midkey. - Optional optionalMidKey = reader.midKey(); + Optional optionalMidKey = reader.midKey(); if (!optionalMidKey.isPresent()) { return Optional.empty(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 386f64166ef4..cd7c63b8270c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -167,7 +168,7 @@ public BoundaryMultiWriter(CellComparator comparator, List targetBoundar } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { if (currentWriter == null && existingWriters.isEmpty()) { // First append ever, do a sanity check. sanityCheckLeft(this.boundaries.get(0), cell); @@ -292,7 +293,7 @@ public SizeMultiWriter(CellComparator comparator, int targetCount, long targetKv } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { // If we are waiting for opportunity to close and we started writing different row, // discard the writer and stop waiting. boolean doCreateWriter = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 51807658f2a8..c0647ec01a05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -23,7 +23,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.yetus.audience.InterfaceAudience; @@ -103,7 +103,7 @@ public static TimeRangeTracker create(Type type, long minimumTimestamp, long max * of the key. * @param cell the Cell to include */ - public void includeTimestamp(final Cell cell) { + public void includeTimestamp(final ExtendedCell cell) { includeTimestamp(cell.getTimestamp()); if (PrivateCellUtil.isDeleteColumnOrFamily(cell)) { includeTimestamp(0); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index e58c53c355f4..055ad85e5a39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.PrivateConstants; @@ -422,7 +423,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long bytesWrittenProgressForShippedCall = 0; // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); long currentTime = EnvironmentEdgeManager.currentTime(); long lastMillis = 0; if (LOG.isDebugEnabled()) { @@ -443,7 +444,10 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); try { do { - hasMore = scanner.next(cells, scannerContext); + // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but + // all the server side implementation should only add ExtendedCell to the List, otherwise it + // will cause serious assertions in our code + hasMore = scanner.next((List) cells, scannerContext); currentTime = EnvironmentEdgeManager.currentTime(); if (LOG.isDebugEnabled()) { now = currentTime; @@ -455,7 +459,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel // output to writer: Cell lastCleanCell = null; long lastCleanCellSeqId = 0; - for (Cell c : cells) { + for (ExtendedCell c : cells) { if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) { lastCleanCell = c; lastCleanCellSeqId = c.getSequenceId(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java index 41fb3e7bf12b..1bc799b9824d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.concurrent.CountDownLatch; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; @@ -34,12 +34,12 @@ public class ParallelSeekHandler extends EventHandler { private static final Logger LOG = LoggerFactory.getLogger(ParallelSeekHandler.class); private KeyValueScanner scanner; - private Cell keyValue; + private ExtendedCell keyValue; private long readPoint; private CountDownLatch latch; private Throwable err = null; - public ParallelSeekHandler(KeyValueScanner scanner, Cell keyValue, long readPoint, + public ParallelSeekHandler(KeyValueScanner scanner, ExtendedCell keyValue, long readPoint, CountDownLatch latch) { super(null, EventType.RS_PARALLEL_SEEK); this.scanner = scanner; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java index db4d80508626..5d279461bc56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; import org.apache.yetus.audience.InterfaceAudience; @@ -53,25 +53,26 @@ public interface ColumnTracker extends ShipperListener { /** * Checks if the column is present in the list of requested columns by returning the match code * instance. It does not check against the number of versions for the columns asked for. To do the - * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} method based - * on the return type (INCLUDE) of this method. The values that can be returned by this method are - * {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and {@link MatchCode#SEEK_NEXT_ROW}. + * version check, one has to call {@link #checkVersions(ExtendedCell, long, byte, boolean)} method + * based on the return type (INCLUDE) of this method. The values that can be returned by this + * method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and + * {@link MatchCode#SEEK_NEXT_ROW}. * @param cell a cell with the column to match against * @param type The type of the Cell * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) throws IOException; + ScanQueryMatcher.MatchCode checkColumn(ExtendedCell cell, byte type) throws IOException; /** * Keeps track of the number of versions for the columns asked for. It assumes that the user has * already checked if the cell needs to be included by calling the - * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method are + * {@link #checkColumn(ExtendedCell, byte)} method. The enum values returned by this method are * {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} * and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. Implementations which include all the columns - * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(Cell, byte)} method and - * perform all the operations in this checkVersions method. + * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(ExtendedCell, byte)} + * method and perform all the operations in this checkVersions method. * @param cell a cell with the column to match against * @param timestamp The timestamp of the cell. * @param type the type of the key value (Put/Delete) @@ -82,7 +83,7 @@ public interface ColumnTracker extends ShipperListener { * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, + ScanQueryMatcher.MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException; /** @@ -106,7 +107,7 @@ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, /** * Retrieve the MatchCode for the next row or column */ - MatchCode getNextRowOrNextColumn(Cell cell); + MatchCode getNextRowOrNextColumn(ExtendedCell cell); /** * Give the tracker a chance to declare it's done based on only the timestamp to allow an early @@ -120,6 +121,6 @@ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, * this information from external filters or timestamp range and we then need to indicate this * information to tracker. It is currently implemented for ExplicitColumnTracker. */ - default void doneWithColumn(Cell cell) { + default void doneWithColumn(ExtendedCell cell) { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java index 9a4361a956aa..9ac85bfb3b7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -70,7 +70,7 @@ public boolean isUserScan() { } @Override - public boolean moreRowsMayExistAfter(Cell cell) { + public boolean moreRowsMayExistAfter(ExtendedCell cell) { return true; } @@ -81,7 +81,7 @@ public Filter getFilter() { } @Override - public Cell getNextKeyHint(Cell cell) throws IOException { + public ExtendedCell getNextKeyHint(ExtendedCell cell) throws IOException { // no filter, so no key hint. return null; } @@ -91,7 +91,7 @@ protected void reset() { deletes.reset(); } - protected final void trackDelete(Cell cell) { + protected final void trackDelete(ExtendedCell cell) { // If keepDeletedCells is true, then we only remove cells by versions or TTL during // compaction, so we do not need to track delete here. // If keepDeletedCells is TTL and the delete marker is expired, then we can make sure that the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java index 56ac265dd187..53816e9f3f34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.yetus.audience.InterfaceAudience; @@ -42,14 +42,14 @@ public interface DeleteTracker extends ShipperListener { * This is called when a Delete is encountered in a StoreFile. * @param cell - the delete cell */ - void add(Cell cell); + void add(ExtendedCell cell); /** * Check if the specified cell buffer has been deleted by a previously seen delete. * @param cell - current cell to check if deleted by a previously seen delete * @return deleteResult The result tells whether the Cell is deleted and why */ - DeleteResult isDeleted(Cell cell); + DeleteResult isDeleted(ExtendedCell cell); /** Returns true if there are no current delete, false otherwise */ boolean isEmpty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java index 397e2631a440..ada4e31fb9bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -60,7 +60,7 @@ protected DropDeletesCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker this.earliestPutTs = earliestPutTs; } - protected final MatchCode tryDropDelete(Cell cell) { + protected final MatchCode tryDropDelete(ExtendedCell cell) { long timestamp = cell.getTimestamp(); // If it is not the time to drop the delete marker, just return if (timeToPurgeDeletes > 0 && now - timestamp <= timeToPurgeDeletes) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index 1ce2c6136cc2..ec9810ef9753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -19,8 +19,8 @@ import java.io.IOException; import java.util.NavigableSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; @@ -102,7 +102,7 @@ public ColumnCount getColumnHint() { * {@inheritDoc} */ @Override - public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) { + public ScanQueryMatcher.MatchCode checkColumn(ExtendedCell cell, byte type) { // delete markers should never be passed to an // *Explicit*ColumnTracker assert !PrivateCellUtil.isDelete(type); @@ -152,7 +152,7 @@ public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) { } @Override - public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, + public ScanQueryMatcher.MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { assert !PrivateCellUtil.isDelete(type); if (ignoreCount) { @@ -210,7 +210,7 @@ private boolean isExpired(long timestamp) { } @Override - public void doneWithColumn(Cell cell) { + public void doneWithColumn(ExtendedCell cell) { while (this.column != null) { int compare = CellUtil.compareQualifiers(cell, column.getBuffer(), column.getOffset(), column.getLength()); @@ -232,7 +232,7 @@ public void doneWithColumn(Cell cell) { } @Override - public MatchCode getNextRowOrNextColumn(Cell cell) { + public MatchCode getNextRowOrNextColumn(ExtendedCell cell) { doneWithColumn(cell); if (getColumnHint() == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java index c6776a05a41d..547d2e0673e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -34,7 +34,7 @@ public IncludeAllCompactionQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { return MatchCode.INCLUDE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java index 7d3d973779c8..9be9e6d91798 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -36,7 +36,7 @@ public MajorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { MatchCode returnCode = preCheck(cell); if (returnCode != null) { return returnCode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java index 70e474e106b8..847eff44b318 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -35,7 +35,7 @@ public MinorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { MatchCode returnCode = preCheck(cell); if (returnCode != null) { return returnCode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java index 146f67dbd2fb..820bb6baa7b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java @@ -26,9 +26,9 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; @@ -127,7 +127,7 @@ protected DeleteVersionsNode() { this(Long.MIN_VALUE, Long.MAX_VALUE); } - public void addVersionDelete(Cell cell) { + public void addVersionDelete(ExtendedCell cell) { SortedSet set = deletesMap.get(cell.getTimestamp()); if (set == null) { set = new TreeSet<>(); @@ -161,7 +161,7 @@ protected DeleteVersionsNode getDeepCopy() { * @return If this put has duplicate ts with last cell, return the mvcc of last cell. Else return * MAX_VALUE. */ - protected long prepare(Cell cell) { + protected long prepare(ExtendedCell cell) { if (isColumnQualifierChanged(cell)) { // The last cell is family-level delete and this is not, or the cq is changed, // we should construct delColMap as a deep copy of delFamMap. @@ -186,7 +186,7 @@ protected long prepare(Cell cell) { return Long.MAX_VALUE; } - private boolean isColumnQualifierChanged(Cell cell) { + private boolean isColumnQualifierChanged(ExtendedCell cell) { if ( delColMap.isEmpty() && lastCqArray == null && cell.getQualifierLength() == 0 && (PrivateCellUtil.isDeleteColumns(cell) || PrivateCellUtil.isDeleteColumnVersion(cell)) @@ -199,7 +199,7 @@ private boolean isColumnQualifierChanged(Cell cell) { // DeleteTracker @Override - public void add(Cell cell) { + public void add(ExtendedCell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (Type.codeToType(type)) { @@ -231,7 +231,7 @@ public void add(Cell cell) { * @return We don't distinguish DeleteColumn and DeleteFamily. We only return code for column. */ @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { long duplicateMvcc = prepare(cell); for (Map.Entry e : delColMap.tailMap(cell.getSequenceId()) @@ -281,7 +281,7 @@ public void update() { // ColumnTracker @Override - public MatchCode checkColumn(Cell cell, byte type) throws IOException { + public MatchCode checkColumn(ExtendedCell cell, byte type) throws IOException { if (columns == null) { return MatchCode.INCLUDE; } @@ -305,7 +305,7 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { } @Override - public MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) + public MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { assert !PrivateCellUtil.isDelete(type); // We drop old version in #isDeleted, so here we won't SKIP because of versioning. But we should @@ -370,7 +370,7 @@ public ColumnCount getColumnHint() { } @Override - public MatchCode getNextRowOrNextColumn(Cell cell) { + public MatchCode getNextRowOrNextColumn(ExtendedCell cell) { // TODO maybe we can optimize. return MatchCode.SEEK_NEXT_COL; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java index 93288cba8cd4..9ad3c792345e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; @@ -55,7 +55,7 @@ public void beforeShipped() throws IOException { } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java index 180d2dd2ed31..dcffbb140ed0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -35,7 +35,7 @@ protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker column } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index 8fdee2da524e..efe09cce722f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -20,9 +20,9 @@ import java.io.IOException; import java.util.SortedSet; import java.util.TreeSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; @@ -48,7 +48,7 @@ public class ScanDeleteTracker implements DeleteTracker { protected boolean hasFamilyStamp = false; protected long familyStamp = 0L; protected SortedSet familyVersionStamps = new TreeSet(); - protected Cell deleteCell = null; + protected ExtendedCell deleteCell = null; protected byte[] deleteBuffer = null; protected int deleteOffset = 0; protected int deleteLength = 0; @@ -67,7 +67,7 @@ public ScanDeleteTracker(CellComparator comparator) { * @param cell - the delete cell */ @Override - public void add(Cell cell) { + public void add(ExtendedCell cell) { long timestamp = cell.getTimestamp(); byte type = cell.getTypeByte(); if (!hasFamilyStamp || timestamp > familyStamp) { @@ -99,7 +99,7 @@ public void add(Cell cell) { * @param cell - current cell to check if deleted by a previously seen delete */ @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { long timestamp = cell.getTimestamp(); if (hasFamilyStamp && timestamp <= familyStamp) { return DeleteResult.FAMILY_DELETED; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 614465c1827f..dc3259f03d3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; @@ -116,7 +117,7 @@ public static enum MatchCode { protected final CellComparator rowComparator; /** Key to seek to in memstore and StoreFiles */ - protected final Cell startKey; + protected final ExtendedCell startKey; /** Keeps track of columns and versions */ protected final ColumnTracker columns; @@ -127,9 +128,9 @@ public static enum MatchCode { protected final long now; /** Row the query is on */ - protected Cell currentRow; + protected ExtendedCell currentRow; - protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker columns, + protected ScanQueryMatcher(ExtendedCell startKey, ScanInfo scanInfo, ColumnTracker columns, long oldestUnexpiredTS, long now) { this.rowComparator = scanInfo.getComparator(); this.startKey = startKey; @@ -139,7 +140,7 @@ protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker colum } /** Returns true if the cell is expired */ - private static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, + private static boolean isCellTTLExpired(final ExtendedCell cell, final long oldestTimestamp, final long now) { // Look for a TTL tag first. Use it instead of the family setting if // found. If a cell has multiple TTLs, resolve the conflict by using the @@ -168,7 +169,7 @@ private static boolean isCellTTLExpired(final Cell cell, final long oldestTimest * Check before the delete logic. * @return null means continue. */ - protected final MatchCode preCheck(Cell cell) { + protected final MatchCode preCheck(ExtendedCell cell) { if (currentRow == null) { // Since the curCell is null it means we are already sure that we have moved over to the next // row @@ -197,7 +198,7 @@ protected final MatchCode preCheck(Cell cell) { return null; } - protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { + protected final MatchCode checkDeleted(DeleteTracker deletes, ExtendedCell cell) { if (deletes.isEmpty() && !(deletes instanceof NewVersionBehaviorTracker)) { return null; } @@ -235,10 +236,10 @@ protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - public abstract MatchCode match(Cell cell) throws IOException; + public abstract MatchCode match(ExtendedCell cell) throws IOException; /** Returns the start key */ - public Cell getStartKey() { + public ExtendedCell getStartKey() { return startKey; } @@ -246,7 +247,7 @@ public Cell getStartKey() { public abstract boolean hasNullColumnInQuery(); /** Returns a cell represent the current row */ - public Cell currentRow() { + public ExtendedCell currentRow() { return currentRow; } @@ -262,7 +263,7 @@ public void clearCurrentRow() { /** * Set the row when there is change in row */ - public void setToNewRow(Cell currentRow) { + public void setToNewRow(ExtendedCell currentRow) { this.currentRow = currentRow; columns.reset(); reset(); @@ -275,16 +276,16 @@ public void setToNewRow(Cell currentRow) { * stopRow or we are scanning on row only because this Scan is for a Get, * etc. */ - public abstract boolean moreRowsMayExistAfter(Cell cell); + public abstract boolean moreRowsMayExistAfter(ExtendedCell cell); - public Cell getKeyForNextColumn(Cell cell) { + public ExtendedCell getKeyForNextColumn(ExtendedCell cell) { // We aren't sure whether any DeleteFamily cells exist, so we can't skip to next column. // TODO: Current way disable us to seek to next column quickly. Is there any better solution? // see HBASE-18471 for more details // see TestFromClientSide3#testScanAfterDeletingSpecifiedRow // see TestFromClientSide3#testScanAfterDeletingSpecifiedRowV2 if (cell.getQualifierLength() == 0) { - Cell nextKey = PrivateCellUtil.createNextOnRowCol(cell); + ExtendedCell nextKey = PrivateCellUtil.createNextOnRowCol(cell); if (nextKey != cell) { return nextKey; } @@ -306,7 +307,7 @@ public Cell getKeyForNextColumn(Cell cell) { * @param currentCell The Cell we're using to calculate the seek key * @return result of the compare between the indexed key and the key portion of the passed cell */ - public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { + public int compareKeyForNextRow(ExtendedCell nextIndexed, ExtendedCell currentCell) { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } @@ -316,7 +317,7 @@ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { * @param currentCell The Cell we're using to calculate the seek key * @return result of the compare between the indexed key and the key portion of the passed cell */ - public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { + public int compareKeyForNextColumn(ExtendedCell nextIndexed, ExtendedCell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, @@ -335,7 +336,7 @@ public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { /** * Delegate to {@link Filter#getNextCellHint(Cell)}. If no filter, return {@code null}. */ - public abstract Cell getNextKeyHint(Cell cell) throws IOException; + public abstract ExtendedCell getNextKeyHint(ExtendedCell cell) throws IOException; @Override public void beforeShipped() throws IOException { @@ -347,7 +348,7 @@ public void beforeShipped() throws IOException { } } - protected static Cell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) { + protected static ExtendedCell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) { return PrivateCellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java index ea0afee21787..407cedd8b225 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -33,7 +33,7 @@ */ @InterfaceAudience.Private public class ScanWildcardColumnTracker implements ColumnTracker { - private Cell columnCell = null; + private ExtendedCell columnCell = null; private int currentCount = 0; private final int maxVersions; private final int minVersions; @@ -68,7 +68,7 @@ public ScanWildcardColumnTracker(int minVersion, int maxVersion, long oldestUnex * {@inheritDoc} This receives puts *and* deletes. */ @Override - public MatchCode checkColumn(Cell cell, byte type) throws IOException { + public MatchCode checkColumn(ExtendedCell cell, byte type) throws IOException { return MatchCode.INCLUDE; } @@ -77,7 +77,7 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { * take the version of the previous put (so eventually all but the last can be reclaimed). */ @Override - public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, + public ScanQueryMatcher.MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { if (columnCell == null) { // first iteration. @@ -121,7 +121,7 @@ public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte + "smaller than the previous column: " + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); } - private void resetCell(Cell columnCell) { + private void resetCell(ExtendedCell columnCell) { this.columnCell = columnCell; currentCount = 0; } @@ -192,7 +192,7 @@ public boolean done() { } @Override - public MatchCode getNextRowOrNextColumn(Cell cell) { + public MatchCode getNextRowOrNextColumn(ExtendedCell cell) { return MatchCode.SEEK_NEXT_COL; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java index 370164c8a0d5..a59d23d1664e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -50,7 +50,7 @@ public StripeCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { MatchCode returnCode = preCheck(cell); if (returnCode != null) { return returnCode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index 6c3d002b0929..c07b91b77e68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -21,6 +21,8 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; @@ -57,9 +59,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { private int count = 0; - private Cell curColCell = null; + private ExtendedCell curColCell = null; - private static Cell createStartKey(Scan scan, ScanInfo scanInfo) { + private static ExtendedCell createStartKey(Scan scan, ScanInfo scanInfo) { if (scan.includeStartRow()) { return createStartKeyFromRow(scan.getStartRow(), scanInfo); } else { @@ -104,11 +106,19 @@ public Filter getFilter() { } @Override - public Cell getNextKeyHint(Cell cell) throws IOException { + public ExtendedCell getNextKeyHint(ExtendedCell cell) throws IOException { if (filter == null) { return null; } else { - return filter.getNextCellHint(cell); + Cell hint = filter.getNextCellHint(cell); + if (hint == null || hint instanceof ExtendedCell) { + return (ExtendedCell) hint; + } else { + throw new DoNotRetryIOException("Incorrect filter implementation, " + + "the Cell returned by getNextKeyHint is not an ExtendedCell. Filter class: " + + filter.getClass().getName()); + } + } } @@ -120,7 +130,7 @@ public void beforeShipped() throws IOException { } } - protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) + protected final MatchCode matchColumn(ExtendedCell cell, long timestamp, byte typeByte) throws IOException { int tsCmp = tr.compare(timestamp); if (tsCmp > 0) { @@ -187,7 +197,7 @@ protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) * INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW * */ - private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, + private final MatchCode mergeFilterResponse(ExtendedCell cell, MatchCode matchCode, ReturnCode filterResponse) { switch (filterResponse) { case SKIP: @@ -259,7 +269,7 @@ private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, protected abstract boolean moreRowsMayExistsAfter(int cmpToStopRow); @Override - public boolean moreRowsMayExistAfter(Cell cell) { + public boolean moreRowsMayExistAfter(ExtendedCell cell) { // If a 'get' Scan -- we are doing a Get (every Get is a single-row Scan in implementation) -- // then we are looking at one row only, the one specified in the Get coordinate..so we know // for sure that there are no more rows on this Scan diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java index 4201dd07533d..754368f73f3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java @@ -25,6 +25,7 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -173,12 +174,15 @@ public EncryptedKvEncoder(OutputStream os, Encryptor encryptor) { } @Override - public void write(Cell cell) throws IOException { + public void write(Cell c) throws IOException { if (encryptor == null) { - super.write(cell); + super.write(c); return; } + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; + byte[] iv = nextIv(); encryptor.setIv(iv); encryptor.reset(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 84709cbc58dd..e6a20b0d0206 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -23,6 +23,7 @@ import java.io.OutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -232,7 +233,9 @@ public CompressedKvEncoder(OutputStream out, CompressionContext compression) { } @Override - public void write(Cell cell) throws IOException { + public void write(Cell c) throws IOException { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; // We first write the KeyValue infrastructure as VInts. StreamUtils.writeRawVInt32(out, KeyValueUtil.keyLength(cell)); StreamUtils.writeRawVInt32(out, cell.getValueLength()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 9555092e7206..b9459b38f5eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -22,6 +22,7 @@ import java.util.Objects; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -130,7 +131,7 @@ public ReturnCode filterCell(final Cell cell) { case CHECK_CELL_DEFAULT: { if ( authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) - || authManager.authorizeCell(user, table, cell, Permission.Action.READ) + || authManager.authorizeCell(user, table, (ExtendedCell) cell, Permission.Action.READ) ) { return ReturnCode.INCLUDE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 563470f9404d..d0c19d7cfcd5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -469,12 +470,12 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce } } } else if (entry.getValue() instanceof List) { - List list = (List) entry.getValue(); + List list = (List) entry.getValue(); if (list == null || list.isEmpty()) { get.addFamily(col); } else { // In case of family delete, a Cell will be added into the list with Qualifier as null. - for (Cell cell : list) { + for (ExtendedCell cell : list) { if ( cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode() || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode()) @@ -609,7 +610,9 @@ private static void addCellPermissions(final byte[] perms, Map> e : familyMap.entrySet()) { List newCells = Lists.newArrayList(); - for (Cell cell : e.getValue()) { + for (Cell c : e.getValue()) { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, perms)); @@ -1747,7 +1750,8 @@ private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell // We have checked the ACL tag of mutation is not null. // So that the tags could not be empty. tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, mutation.getACL())); - return PrivateCellUtil.createCell(newCell, tags); + assert newCell instanceof ExtendedCell; + return PrivateCellUtil.createCell((ExtendedCell) newCell, tags); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 9d9f90765c72..830c360e61a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; @@ -503,7 +504,8 @@ public boolean evaluate(Cell cell) throws IOException { @Override public boolean evaluate(Cell cell) throws IOException { boolean visibilityTagPresent = false; - Iterator tagsItr = PrivateCellUtil.tagsIterator(cell); + assert cell instanceof ExtendedCell; + Iterator tagsItr = PrivateCellUtil.tagsIterator((ExtendedCell) cell); while (tagsItr.hasNext()) { boolean includeKV = true; Tag tag = tagsItr.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index c5aa902de3dd..0e98fd456aed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -300,7 +301,9 @@ public void preBatchMutate(ObserverContext c, boolean modifiedTagFound = false; Pair pair = new Pair<>(false, null); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - pair = checkForReservedVisibilityTagPresence(cellScanner.current(), pair); + Cell cell = cellScanner.current(); + assert cell instanceof ExtendedCell; + pair = checkForReservedVisibilityTagPresence((ExtendedCell) cell, pair); if (!pair.getFirst()) { // Don't disallow reserved tags if authorization is disabled if (authorizationEnabled) { @@ -338,21 +341,23 @@ public void preBatchMutate(ObserverContext c, } } if (visibilityTags != null) { - List updatedCells = new ArrayList<>(); + List updatedCells = new ArrayList<>(); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - Cell cell = cellScanner.current(); + Cell ce = cellScanner.current(); + assert ce instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) ce; List tags = PrivateCellUtil.getTags(cell); if (modifiedTagFound) { // Rewrite the tags by removing the modified tags. removeReplicationVisibilityTag(tags); } tags.addAll(visibilityTags); - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + ExtendedCell updatedCell = PrivateCellUtil.createCell(cell, tags); updatedCells.add(updatedCell); } m.getFamilyCellMap().clear(); // Clear and add new Cells to the Mutation. - for (Cell cell : updatedCells) { + for (ExtendedCell cell : updatedCells) { if (m instanceof Put) { Put p = (Put) m; p.add(cell); @@ -430,7 +435,7 @@ public void prePrepareTimeStampForDeleteVersion(ObserverContext checkForReservedVisibilityTagPresence(Cell cell, + private Pair checkForReservedVisibilityTagPresence(ExtendedCell cell, Pair pair) throws IOException { if (pair == null) { pair = new Pair<>(false, null); @@ -631,8 +636,8 @@ public List> postIncrementBeforeWAL( List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), + createNewCellWithTags(mutation, (ExtendedCell) pair.getSecond()))); } return resultPairs; } @@ -643,13 +648,13 @@ public List> postAppendBeforeWAL( List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), + createNewCellWithTags(mutation, (ExtendedCell) pair.getSecond()))); } return resultPairs; } - private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOException { + private Cell createNewCellWithTags(Mutation mutation, ExtendedCell newCell) throws IOException { List tags = Lists.newArrayList(); CellVisibility cellVisibility = null; try { @@ -983,7 +988,12 @@ public boolean filterRowKey(Cell cell) throws IOException { @Override public ReturnCode filterCell(final Cell cell) throws IOException { List putVisTags = new ArrayList<>(); - Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); + Byte putCellVisTagsFormat = null; + if (cell instanceof ExtendedCell) { + putCellVisTagsFormat = + VisibilityUtils.extractVisibilityTags((ExtendedCell) cell, putVisTags); + } + if (putVisTags.isEmpty() && deleteCellVisTags.isEmpty()) { // Early out if there are no tags in the cell return ReturnCode.INCLUDE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java index 026a99796c9f..e822e663a508 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java @@ -28,8 +28,8 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.regionserver.querymatcher.NewVersionBehaviorTracker; @@ -56,7 +56,7 @@ private static class TagInfo { List tags; Byte format; - private TagInfo(Cell c) { + private TagInfo(ExtendedCell c) { tags = new ArrayList<>(); format = VisibilityUtils.extractVisibilityTags(c, tags); } @@ -98,7 +98,7 @@ protected VisibilityDeleteVersionsNode getDeepCopy() { } @Override - public void addVersionDelete(Cell cell) { + public void addVersionDelete(ExtendedCell cell) { SortedMap set = deletesMap.get(cell.getTimestamp()); if (set == null) { set = new TreeMap<>(); @@ -117,7 +117,7 @@ public void addVersionDelete(Cell cell) { } @Override - public void add(Cell cell) { + public void add(ExtendedCell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (KeyValue.Type.codeToType(type)) { @@ -143,7 +143,7 @@ public void add(Cell cell) { } } - private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { + private boolean tagMatched(ExtendedCell put, TagInfo delInfo) throws IOException { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, putVisTags); return putVisTags.isEmpty() == delInfo.tags.isEmpty() @@ -153,7 +153,7 @@ private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { } @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { try { long duplicateMvcc = prepare(cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index 5cffb51500a2..1b91ed718f61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -25,6 +25,7 @@ import java.util.concurrent.TimeoutException; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -75,7 +76,9 @@ public boolean replicate(ReplicateContext replicateContext) { for (Entry entry : entries) { WALEdit newEdit = new WALEdit(); ArrayList cells = entry.getEdit().getCells(); - for (Cell cell : cells) { + for (Cell c : cells) { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; if (cell.getTagsLength() > 0) { visTags.clear(); nonVisTags.clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 59623ece1359..180bd3cc4eae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; @@ -47,7 +47,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { /** * This tag is used for the DELETE cell which has no visibility label. */ - private static final List EMPTY_TAG = Collections.EMPTY_LIST; + private static final List EMPTY_TAG = Collections.emptyList(); // Its better to track the visibility tags in delete based on each type. Create individual // data structures for tracking each of them. This would ensure that there is no tracking based // on time and also would handle all cases where deletefamily or deletecolumns is specified with @@ -70,7 +70,7 @@ public VisibilityScanDeleteTracker(CellComparator comparator) { } @Override - public void add(Cell delCell) { + public void add(ExtendedCell delCell) { // Cannot call super.add because need to find if the delete needs to be considered long timestamp = delCell.getTimestamp(); byte type = delCell.getTypeByte(); @@ -110,7 +110,7 @@ public void add(Cell delCell) { extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type)); } - private boolean extractDeleteCellVisTags(Cell delCell, Type type) { + private boolean extractDeleteCellVisTags(ExtendedCell delCell, Type type) { // If tag is present in the delete boolean hasVisTag = false; Byte deleteCellVisTagsFormat = null; @@ -178,7 +178,7 @@ private boolean extractDeleteCellVisTags(Cell delCell, Type type) { } @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { long timestamp = cell.getTimestamp(); try { if (hasFamilyStamp) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index d450228ea3ef..e1975bc3b162 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -34,7 +34,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -202,7 +202,7 @@ public static List getScanLabelGenerators(Configuration conf * @param tags - the array that will be populated if visibility tags are present * @return The visibility tags serialization format */ - public static Byte extractVisibilityTags(Cell cell, List tags) { + public static Byte extractVisibilityTags(ExtendedCell cell, List tags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -225,7 +225,8 @@ public static Byte extractVisibilityTags(Cell cell, List tags) { * @return - the serailization format of the tag. Can be null if no tags are found or if there is * no visibility tag found */ - public static Byte extractAndPartitionTags(Cell cell, List visTags, List nonVisTags) { + public static Byte extractAndPartitionTags(ExtendedCell cell, List visTags, + List nonVisTags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -242,7 +243,7 @@ public static Byte extractAndPartitionTags(Cell cell, List visTags, List tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 24578417ef34..98e6631e3055 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -59,6 +59,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -825,7 +826,7 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, halfReader.getStoreFileScanner(false, false, false, Long.MAX_VALUE, 0, false)) { scanner.seek(KeyValue.LOWESTKEY); for (;;) { - Cell cell = scanner.next(); + ExtendedCell cell = scanner.next(); if (cell == null) { break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java index 94c58dde4e00..c9860159d00f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.yetus.audience.InterfaceAudience; @@ -44,7 +45,7 @@ public Cell getLastCell() { /** * Bloom information from the cell is retrieved */ - public void writeBloom(Cell cell) throws IOException { + public void writeBloom(ExtendedCell cell) throws IOException { // only add to the bloom filter on a new, unique key if (isNewKey(cell)) { sanityCheck(cell); @@ -52,7 +53,7 @@ public void writeBloom(Cell cell) throws IOException { } } - private void sanityCheck(Cell cell) throws IOException { + private void sanityCheck(ExtendedCell cell) throws IOException { if (this.getLastCell() != null) { if (comparator.compare(cell, this.getLastCell()) <= 0) { throw new IOException("Added a key not lexically larger than" + " previous. Current cell = " @@ -71,5 +72,5 @@ private void sanityCheck(Cell cell) throws IOException { * @param cell the cell to be verified * @return true if a new key else false */ - protected abstract boolean isNewKey(Cell cell); + protected abstract boolean isNewKey(ExtendedCell cell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java index 5465c24540a1..6667b4235534 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java @@ -22,8 +22,8 @@ import java.util.Iterator; import java.util.List; import java.util.SortedSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner; import org.apache.yetus.audience.InterfaceAudience; @@ -32,36 +32,36 @@ */ @InterfaceAudience.Private public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { - final private Iterable data; + final private Iterable data; final CellComparator comparator; - private Iterator iter; - private Cell current; + private Iterator iter; + private ExtendedCell current; - public CollectionBackedScanner(SortedSet set) { + public CollectionBackedScanner(SortedSet set) { this(set, CellComparator.getInstance()); } - public CollectionBackedScanner(SortedSet set, CellComparator comparator) { + public CollectionBackedScanner(SortedSet set, CellComparator comparator) { this.comparator = comparator; data = set; init(); } - public CollectionBackedScanner(List list) { + public CollectionBackedScanner(List list) { this(list, CellComparator.getInstance()); } - public CollectionBackedScanner(List list, CellComparator comparator) { + public CollectionBackedScanner(List list, CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(CellComparator comparator, Cell... array) { + public CollectionBackedScanner(CellComparator comparator, ExtendedCell... array) { this.comparator = comparator; - List tmp = new ArrayList<>(array.length); + List tmp = new ArrayList<>(array.length); Collections.addAll(tmp, array); Collections.sort(tmp, comparator); data = tmp; @@ -76,13 +76,13 @@ private void init() { } @Override - public Cell peek() { + public ExtendedCell peek() { return current; } @Override - public Cell next() { - Cell oldCurrent = current; + public ExtendedCell next() { + ExtendedCell oldCurrent = current; if (iter.hasNext()) { current = iter.next(); } else { @@ -92,16 +92,16 @@ public Cell next() { } @Override - public boolean seek(Cell seekCell) { + public boolean seek(ExtendedCell seekCell) { // restart iterator iter = data.iterator(); return reseek(seekCell); } @Override - public boolean reseek(Cell seekCell) { + public boolean reseek(ExtendedCell seekCell) { while (iter.hasNext()) { - Cell next = iter.next(); + ExtendedCell next = iter.next(); int ret = comparator.compare(next, seekCell); if (ret >= 0) { current = next; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 9065ebf116b7..d58bcdac74d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -122,7 +123,7 @@ public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Ex HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create(); // Write any-old Cell... final byte[] rowKey = Bytes.toBytes("compressiontestkey"); - Cell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey) + ExtendedCell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey) .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) .setValue(Bytes.toBytes("compressiontestval")).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 0d24ef783762..c3eafa7c11d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -991,9 +992,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { HFile.Reader hf = null; try { hf = HFile.createReader(fs, hfile.getPath(), CacheConfig.DISABLED, true, getConf()); - Optional startKv = hf.getFirstKey(); + Optional startKv = hf.getFirstKey(); start = CellUtil.cloneRow(startKv.get()); - Optional endKv = hf.getLastKey(); + Optional endKv = hf.getLastKey(); end = CellUtil.cloneRow(endKv.get()); } catch (Exception ioe) { LOG.warn("Problem reading orphan file " + hfile + ", skipping"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java index 46aa6ece1bf5..38781eaef61c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.yetus.audience.InterfaceAudience; @@ -45,7 +45,7 @@ public void addLastBloomKey(Writer writer) throws IOException { } @Override - protected boolean isNewKey(Cell cell) { + protected boolean isNewKey(ExtendedCell cell) { if (this.getLastCell() != null) { return !CellUtil.matchingRows(cell, this.getLastCell()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java index 140feb117d89..cb1bcef01f89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.yetus.audience.InterfaceAudience; @@ -41,7 +41,7 @@ public RowColBloomContext(BloomFilterWriter generalBloomFilterWriter, CellCompar @Override public void addLastBloomKey(Writer writer) throws IOException { if (this.getLastCell() != null) { - Cell firstOnRow = PrivateCellUtil.createFirstOnRowCol(this.getLastCell()); + ExtendedCell firstOnRow = PrivateCellUtil.createFirstOnRowCol(this.getLastCell()); // This copy happens only once when the writer is closed byte[] key = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(firstOnRow); writer.appendFileInfo(LAST_BLOOM_KEY, key); @@ -49,7 +49,7 @@ public void addLastBloomKey(Writer writer) throws IOException { } @Override - protected boolean isNewKey(Cell cell) { + protected boolean isNewKey(ExtendedCell cell) { if (this.getLastCell() != null) { return !CellUtil.matchingRowColumn(cell, this.getLastCell()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java index 622735847508..dee0897fb4b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.yetus.audience.InterfaceAudience; @@ -39,7 +40,8 @@ public RowPrefixFixedLengthBloomContext(BloomFilterWriter bloomFilterWriter, this.prefixLength = prefixLength; } - public void writeBloom(Cell cell) throws IOException { + @Override + public void writeBloom(ExtendedCell cell) throws IOException { super.writeBloom(getRowPrefixCell(cell)); } @@ -47,7 +49,7 @@ public void writeBloom(Cell cell) throws IOException { * @param cell the cell * @return the new cell created by row prefix */ - private Cell getRowPrefixCell(Cell cell) { + private ExtendedCell getRowPrefixCell(ExtendedCell cell) { byte[] row = CellUtil.copyRow(cell); return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(row, 0, Math.min(prefixLength, row.length)).setType(Cell.Type.Put).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 05f3d46d2313..b8f095eb03df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.MetaCellComparator; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -75,7 +76,7 @@ public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter, @Override void append(RegionEntryBuffer buffer) throws IOException { - Map familyCells = new HashMap<>(); + Map> familyCells = new HashMap<>(); Map familySeqIds = new HashMap<>(); boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME); // First iterate all Cells to find which column families are present and to stamp Cell with @@ -87,28 +88,29 @@ void append(RegionEntryBuffer buffer) throws IOException { if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { continue; } + // only ExtendedCell can set sequence id, so it is safe to cast it to ExtendedCell later. PrivateCellUtil.setSequenceId(cell, seqId); String familyName = Bytes.toString(CellUtil.cloneFamily(cell)); // comparator need to be specified for meta familyCells .computeIfAbsent(familyName, - key -> new CellSet( + key -> new CellSet<>( isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)) - .add(cell); + .add((ExtendedCell) cell); familySeqIds.compute(familyName, (k, v) -> v == null ? seqId : Math.max(v, seqId)); } } // Create a new hfile writer for each column family, write edits then close writer. String regionName = Bytes.toString(buffer.encodedRegionName); - for (Map.Entry cellsEntry : familyCells.entrySet()) { + for (Map.Entry> cellsEntry : familyCells.entrySet()) { String familyName = cellsEntry.getKey(); StoreFileWriter writer = createRecoveredHFileWriter(buffer.tableName, regionName, familySeqIds.get(familyName), familyName, isMetaTable); LOG.trace("Created {}", writer.getPath()); openingWritersNum.incrementAndGet(); try { - for (Cell cell : cellsEntry.getValue()) { + for (ExtendedCell cell : cellsEntry.getValue()) { writer.append(cell); } // Append the max seqid to hfile, used when recovery. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index 0a68efe1d7b9..c387dfe13ac0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -267,7 +267,7 @@ public WALEdit add(Cell cell) { } @InterfaceAudience.Private - public WALEdit add(List cells) { + public WALEdit add(List cells) { if (cells == null || cells.isEmpty()) { return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index e4fc17896dd3..fe4d4ddac169 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -285,9 +286,10 @@ public void processFile(final Configuration conf, final Path p) throws IOExcepti // initialize list into which we will store atomic actions List> actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { + assert cell instanceof ExtendedCell; // add atomic operation to txn - Map op = - new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); + Map op = new HashMap<>( + toStringMap((ExtendedCell) cell, outputOnlyRowKey, rowPrefix, row, outputValues)); if (op.isEmpty()) { continue; } @@ -351,7 +353,7 @@ public static void printCell(PrintStream out, Map op, boolean ou out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, boolean printRowKeyOnly, + public static Map toStringMap(ExtendedCell cell, boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); String rowKey = @@ -393,7 +395,7 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly return stringMap; } - public static Map toStringMap(Cell cell) { + public static Map toStringMap(ExtendedCell cell) { return toStringMap(cell, false, null, null, false); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index e60d23d12065..60ba0f85c006 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -73,7 +73,7 @@ static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) { return w; } - static Cell createCell(final int i) { + static ExtendedCell createCell(final int i) { return createCell(i, HConstants.EMPTY_BYTE_ARRAY); } @@ -84,18 +84,18 @@ static Cell createCell(final int i) { * @param value Value to use * @return Created Cell. */ - static Cell createCell(final int i, final byte[] value) { + static ExtendedCell createCell(final int i, final byte[] value) { return createCell(format(i), value); } - static Cell createCell(final byte[] keyRow) { + static ExtendedCell createCell(final byte[] keyRow) { return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) .setValue(HConstants.EMPTY_BYTE_ARRAY).build(); } - static Cell createCell(final byte[] keyRow, final byte[] value) { + static ExtendedCell createCell(final byte[] keyRow, final byte[] value) { return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) @@ -465,7 +465,7 @@ void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, false); byte[] b = getRandomRow(); // System.out.println("Random row: " + new String(b)); - Cell c = createCell(b); + ExtendedCell c = createCell(b); if (scanner.seekTo(c) != 0) { LOG.info("Nonexistent row: " + new String(b)); return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java index 2441a03a4b30..228cb66c00a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java @@ -35,12 +35,12 @@ public class TestTagRewriteCell { @Test public void testHeapSize() { - Cell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + ExtendedCell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(Bytes.toBytes("row")).setFamily(HConstants.EMPTY_BYTE_ARRAY) .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) .setType(KeyValue.Type.Maximum.getCode()).setValue(Bytes.toBytes("value")).build(); final int fakeTagArrayLength = 10; - Cell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]); + ExtendedCell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]); // Get the heapSize before the internal tags array in trCell are nuked long trCellHeapSize = ((HeapSize) trCell).heapSize(); @@ -48,7 +48,7 @@ public void testHeapSize() { // Make another TagRewriteCell with the original TagRewriteCell // This happens on systems with more than one RegionObserver/Coproc loaded (such as // VisibilityController and AccessController) - Cell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]); + ExtendedCell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]); assertTrue( "TagRewriteCell containing a TagRewriteCell's heapsize should be " diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java index 1b3a66ff171f..2c8f4d201870 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java @@ -32,8 +32,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -153,7 +153,7 @@ protected List selectScannersFrom(HStore store, for (KeyValueScanner scanner : scanners) { newScanners.add(new DelegatingKeyValueScanner(scanner) { @Override - public boolean reseek(Cell key) throws IOException { + public boolean reseek(ExtendedCell key) throws IOException { if (ON.get()) { REQ_COUNT.incrementAndGet(); if (!THROW_ONCE.get() || REQ_COUNT.get() == 1) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 3a285a21f404..0954ece74a3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; @@ -179,7 +180,7 @@ public void testHalfScanner() throws IOException { w.close(); HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf); - Cell midKV = r.midKey().get(); + ExtendedCell midKV = r.midKey().get(); byte[] midkey = CellUtil.cloneRow(midKV); Reference bottom = new Reference(midkey, Reference.Range.bottom); @@ -228,7 +229,7 @@ public void testHalfScanner() throws IOException { assertNull(foundKeyValue); } - private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell seekBefore, + private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, ExtendedCell seekBefore, CacheConfig cacheConfig) throws IOException { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, p).build(); StoreFileInfo storeFileInfo = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index 5b3e5db6c2fd..6ae5a74ebe8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -225,7 +226,7 @@ public void testSeekingOnSample() throws IOException { for (boolean seekBefore : new boolean[] { false, true }) { checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); KeyValue midKv = sampleKv.get(sampleKv.size() / 2); - Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); + ExtendedCell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); } LOG.info("Done"); @@ -278,7 +279,7 @@ public void testSeekingToOffHeapKeyValueInSample() throws IOException { for (boolean seekBefore : new boolean[] { false, true }) { checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); KeyValue midKv = sampleKv.get(sampleKv.size() / 2); - Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); + ExtendedCell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); } LOG.info("Done"); @@ -392,7 +393,7 @@ public void testRowIndexWithTagsButNoTagsInCell() throws IOException { } private void checkSeekingConsistency(List encodedSeekers, - boolean seekBefore, Cell keyValue) { + boolean seekBefore, ExtendedCell keyValue) { Cell expectedKeyValue = null; ByteBuffer expectedKey = null; ByteBuffer expectedValue = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index 0b8e3ec77ae4..7dea5b91c79b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -163,7 +164,7 @@ public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException { KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv4); - Cell toSeek = PrivateCellUtil.createLastOnRow(kv3); + ExtendedCell toSeek = PrivateCellUtil.createLastOnRow(kv3); seekToTheKey(kv3, sampleKv, toSeek); } @@ -276,7 +277,8 @@ public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException { seekToTheKey(kv5, sampleKv, toSeek); } - private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) throws IOException { + private void seekToTheKey(KeyValue expected, List kvs, ExtendedCell toSeek) + throws IOException { // create all seekers List encodedSeekers = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { @@ -301,7 +303,7 @@ private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) th } private void checkSeekingConsistency(List encodedSeekers, - Cell keyValue, KeyValue expected) { + ExtendedCell keyValue, KeyValue expected) { for (DataBlockEncoder.EncodedSeeker seeker : encodedSeekers) { seeker.seekToKeyInBlock(keyValue, false); Cell keyValue2 = seeker.getCell(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 7624e2197914..ac9d1fd1fa8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -49,11 +49,11 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; @@ -494,16 +494,17 @@ public void testCorruptOutOfOrderHFileWrite() throws IOException { .withCompression(Compression.Algorithm.NONE).withCompressTags(false).build(); HFileWriterImpl writer = new HFileWriterImpl(conf, cacheConf, path, mockedOutputStream, fileContext); - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); byte[] row = Bytes.toBytes("foo"); byte[] qualifier = Bytes.toBytes("qualifier"); byte[] cf = Bytes.toBytes(columnFamily); byte[] val = Bytes.toBytes("fooVal"); long firstTS = 100L; long secondTS = 101L; - Cell firstCell = cellBuilder.setRow(row).setValue(val).setTimestamp(firstTS) + ExtendedCell firstCell = cellBuilder.setRow(row).setValue(val).setTimestamp(firstTS) .setQualifier(qualifier).setFamily(cf).setType(Cell.Type.Put).build(); - Cell secondCell = cellBuilder.setRow(row).setValue(val).setTimestamp(secondTS) + ExtendedCell secondCell = cellBuilder.setRow(row).setValue(val).setTimestamp(secondTS) .setQualifier(qualifier).setFamily(cf).setType(Cell.Type.Put).build(); // second Cell will sort "higher" than the first because later timestamps should come first writer.append(firstCell); @@ -784,22 +785,22 @@ public void testCompressionOrdinance() { @Test public void testShortMidpointSameQual() { - Cell left = + ExtendedCell left = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("a")) .setFamily(Bytes.toBytes("a")).setQualifier(Bytes.toBytes("a")).setTimestamp(11) .setType(Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); - Cell right = + ExtendedCell right = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("a")) .setFamily(Bytes.toBytes("a")).setQualifier(Bytes.toBytes("a")).setTimestamp(9) .setType(Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); - Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); + ExtendedCell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); } - private Cell getCell(byte[] row, byte[] family, byte[] qualifier) { + private ExtendedCell getCell(byte[] row, byte[] family, byte[] qualifier) { return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) .setFamily(family).setQualifier(qualifier).setTimestamp(HConstants.LATEST_TIMESTAMP) .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); @@ -807,9 +808,9 @@ private Cell getCell(byte[] row, byte[] family, byte[] qualifier) { @Test public void testGetShortMidpoint() { - Cell left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - Cell right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); + ExtendedCell left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + ExtendedCell right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + ExtendedCell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); assertTrue( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java index f74833a3b5eb..e9cd8260e9b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java @@ -44,9 +44,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilder; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.fs.HFileSystem; @@ -465,7 +466,8 @@ protected void before() throws Throwable { HFile.getWriterFactory(testingUtility.getConfiguration(), CacheConfig.DISABLED) .withPath(hfs, path).withFileContext(context); - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); Random rand = new Random(Instant.now().toEpochMilli()); byte[] family = Bytes.toBytes("f"); try (HFile.Writer writer = factory.create()) { @@ -473,7 +475,7 @@ protected void before() throws Throwable { byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 100); byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand); byte[] value = RandomKeyValueUtil.randomValue(rand); - Cell cell = cellBuilder.setType(Cell.Type.Put).setRow(row).setFamily(family) + ExtendedCell cell = cellBuilder.setType(Cell.Type.Put).setRow(row).setFamily(family) .setQualifier(qualifier).setValue(value).build(); writer.append(cell); cellBuilder.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 7134b19ccec3..7ed4d6a19230 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -28,7 +28,7 @@ import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -152,7 +152,7 @@ public void testEncoding() throws IOException { public void testEncodingWithOffheapKeyValue() throws IOException { // usually we have just block without headers, but don't complicate that try { - List kvs = generator.generateTestExtendedOffheapKeyValues(60, true); + List kvs = generator.generateTestExtendedOffheapKeyValues(60, true); HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS) .withIncludesTags(true).withHBaseCheckSum(true).withCompression(Algorithm.NONE) .withBlockSize(0).withChecksumType(ChecksumType.NULL).build(); @@ -214,7 +214,7 @@ private HFileBlock createBlockOnDisk(Configuration conf, List kvs, HFi block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext(), ByteBuffAllocator.HEAP); } - private void writeBlock(Configuration conf, List kvs, HFileContext fileContext, + private void writeBlock(Configuration conf, List kvs, HFileContext fileContext, boolean useTags) throws IOException { HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf, blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); @@ -223,7 +223,7 @@ private void writeBlock(Configuration conf, List kvs, HFileContext fileCon baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); blockEncoder.startBlockEncoding(context, dos); - for (Cell kv : kvs) { + for (ExtendedCell kv : kvs) { blockEncoder.encode(kv, context, dos); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index 8ee46c17c4ad..1a0f280c597b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -34,7 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -105,8 +105,8 @@ private static byte[] randLongBytes() { private Path workDir; private FileSystem fs; private Path hfilePath; - private Cell firstCell = null; - private Cell secondCell = null; + private ExtendedCell firstCell = null; + private ExtendedCell secondCell = null; private ByteBuffAllocator allocator; @BeforeClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index 731e7ab79ac5..04c38127d51f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -104,7 +105,7 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); conf.setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); - Cell[] cells = new Cell[NUM_KV]; + ExtendedCell[] cells = new ExtendedCell[NUM_KV]; Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), String.format( "testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s", hfileVersion, bloomType, testI)); @@ -163,13 +164,15 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { } } - private void checkSeekBefore(Cell[] cells, HFileScanner scanner, int i) throws IOException { + private void checkSeekBefore(ExtendedCell[] cells, HFileScanner scanner, int i) + throws IOException { assertEquals( "Failed to seek to the key before #" + i + " (" + CellUtil.getCellKeyAsString(cells[i]) + ")", true, scanner.seekBefore(cells[i])); } - private void checkNoSeekBefore(Cell[] cells, HFileScanner scanner, int i) throws IOException { + private void checkNoSeekBefore(ExtendedCell[] cells, HFileScanner scanner, int i) + throws IOException { assertEquals("Incorrectly succeeded in seeking to before first key (" + CellUtil.getCellKeyAsString(cells[i]) + ")", false, scanner.seekBefore(cells[i])); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java index 943a7acb6481..4dd5ad1156a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -151,7 +152,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long shippedCallSizeLimit = (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); - Cell mobCell = null; + ExtendedCell mobCell = null; long counter = 0; long countFailAt = -1; @@ -186,7 +187,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel progress.cancel(); return false; } - for (Cell c : cells) { + for (Cell cell : cells) { + ExtendedCell c = (ExtendedCell) cell; counter++; if (compactMOBs) { if (MobUtils.isMobReferenceCell(c)) { @@ -304,7 +306,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel mobCells++; // append the original keyValue in the mob file. mobFileWriter.append(c); - Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); + ExtendedCell reference = + MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); // write the cell whose value is the path of a mob file to the store file. writer.append(reference); cellsCountCompactedToMob++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java index b47184390e86..5ce880485312 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.function.IntConsumer; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; public class DelegatingKeyValueScanner implements KeyValueScanner { @@ -36,22 +36,22 @@ public void shipped() throws IOException { } @Override - public Cell peek() { + public ExtendedCell peek() { return delegate.peek(); } @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { return delegate.next(); } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { return delegate.seek(key); } @Override - public boolean reseek(Cell key) throws IOException { + public boolean reseek(ExtendedCell key) throws IOException { return delegate.reseek(key); } @@ -71,7 +71,8 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) } @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) + throws IOException { return delegate.requestSeek(kv, forward, useBloom); } @@ -96,12 +97,12 @@ public Path getFilePath() { } @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { return delegate.backwardSeek(key); } @Override - public boolean seekToPreviousRow(Cell key) throws IOException { + public boolean seekToPreviousRow(ExtendedCell key) throws IOException { return delegate.seekToPreviousRow(key); } @@ -111,7 +112,7 @@ public boolean seekToLastRow() throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return delegate.getNextIndexedKey(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index c274d3c3129a..601370357744 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -54,8 +55,8 @@ public EncodedSeekPerformanceTest() { numberOfSeeks = DEFAULT_NUMBER_OF_SEEKS; } - private List prepareListOfTestSeeks(Path path) throws IOException { - List allKeyValues = new ArrayList<>(); + private List prepareListOfTestSeeks(Path path) throws IOException { + List allKeyValues = new ArrayList<>(); // read all of the key values HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, @@ -63,7 +64,7 @@ private List prepareListOfTestSeeks(Path path) throws IOException { storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false, false, 0, 0, false); - Cell current; + ExtendedCell current; scanner.seek(KeyValue.LOWESTKEY); while (null != (current = scanner.next())) { @@ -73,9 +74,9 @@ private List prepareListOfTestSeeks(Path path) throws IOException { storeFile.closeStoreFile(cacheConf.shouldEvictOnClose()); // pick seeks by random - List seeks = new ArrayList<>(); + List seeks = new ArrayList<>(); for (int i = 0; i < numberOfSeeks; ++i) { - Cell keyValue = allKeyValues.get(randomizer.nextInt(allKeyValues.size())); + ExtendedCell keyValue = allKeyValues.get(randomizer.nextInt(allKeyValues.size())); seeks.add(keyValue); } @@ -84,7 +85,7 @@ private List prepareListOfTestSeeks(Path path) throws IOException { return seeks; } - private void runTest(Path path, DataBlockEncoding blockEncoding, List seeks) + private void runTest(Path path, DataBlockEncoding blockEncoding, List seeks) throws IOException { // read all of the key values HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, @@ -108,7 +109,7 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, List seek // do seeks long startSeeksTime = System.nanoTime(); - for (Cell keyValue : seeks) { + for (ExtendedCell keyValue : seeks) { scanner.seek(keyValue); Cell toVerify = scanner.next(); if (!keyValue.equals(toVerify)) { @@ -145,7 +146,7 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, List seek * @throws IOException if there is a bug while reading from disk */ public void runTests(Path path, DataBlockEncoding[] encodings) throws IOException { - List seeks = prepareListOfTestSeeks(path); + List seeks = prepareListOfTestSeeks(path); for (DataBlockEncoding blockEncoding : encodings) { runTest(path, blockEncoding, seeks); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java index 9a3c5d2e218b..f404cb2128e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java @@ -19,8 +19,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.CollectionBackedScanner; @@ -30,7 +30,7 @@ * file scanner. */ public class KeyValueScanFixture extends CollectionBackedScanner { - public KeyValueScanFixture(CellComparator comparator, Cell... cells) { + public KeyValueScanFixture(CellComparator comparator, ExtendedCell... cells) { super(comparator, cells); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java index bd26c5474e08..de9606d26ca6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java @@ -25,8 +25,9 @@ import java.util.TreeMap; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -192,10 +193,10 @@ public void close(boolean evictOnClose) throws IOException { } @Override - public Optional getLastKey() { + public Optional getLastKey() { if (splitPoint != null) { return Optional - .of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put) + .of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put) .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build()); } else { return Optional.empty(); @@ -203,9 +204,9 @@ public Optional getLastKey() { } @Override - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { if (splitPoint != null) { - return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY) + return Optional.of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setType(Cell.Type.Put).setRow(splitPoint).build()); } else { return Optional.empty(); @@ -213,9 +214,9 @@ public Optional midKey() throws IOException { } @Override - public Optional getFirstKey() { + public Optional getFirstKey() { if (splitPoint != null) { - return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY) + return Optional.of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1).build()); } else { return Optional.empty(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java index efad9d38e0c4..e2f9ac2f34ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -64,16 +65,18 @@ public static Object[] data() { private static final int NUM_OF_CELLS = 4; private static final int SMALL_CHUNK_SIZE = 64; - private Cell ascCells[]; - private CellArrayMap ascCbOnHeap; - private Cell descCells[]; - private CellArrayMap descCbOnHeap; + private ExtendedCell[] ascCells; + private CellArrayMap ascCbOnHeap; + private ExtendedCell[] descCells; + private CellArrayMap descCbOnHeap; private final static Configuration CONF = new Configuration(); private KeyValue lowerOuterCell; private KeyValue upperOuterCell; - private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in array - private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk in array + private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in + // array + private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk + // in array private final boolean smallChunks; private static ChunkCreator chunkCreator; @@ -116,10 +119,12 @@ public void setUp() throws Exception { final KeyValue kv4 = new KeyValue(four, f, q, 40, v); lowerOuterCell = new KeyValue(Bytes.toBytes(10), f, q, 10, v); upperOuterCell = new KeyValue(Bytes.toBytes(50), f, q, 10, v); - ascCells = new Cell[] { kv1, kv2, kv3, kv4 }; - ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells, 0, NUM_OF_CELLS, false); - descCells = new Cell[] { kv4, kv3, kv2, kv1 }; - descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells, 0, NUM_OF_CELLS, true); + ascCells = new ExtendedCell[] { kv1, kv2, kv3, kv4 }; + ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells, 0, + NUM_OF_CELLS, false); + descCells = new ExtendedCell[] { kv4, kv3, kv2, kv1 }; + descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells, 0, + NUM_OF_CELLS, true); CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); @@ -138,7 +143,7 @@ public void setUp() throws Exception { /* Create and test ascending CellSet based on CellArrayMap */ @Test public void testCellArrayMapAsc() throws Exception { - CellSet cs = new CellSet(ascCbOnHeap); + CellSet cs = new CellSet<>(ascCbOnHeap); testCellBlocks(cs); testIterators(cs); } @@ -146,11 +151,11 @@ public void testCellArrayMapAsc() throws Exception { /* Create and test ascending and descending CellSet based on CellChunkMap */ @Test public void testCellChunkMap() throws Exception { - CellSet cs = new CellSet(ascCCM); + CellSet cs = new CellSet<>(ascCCM); testCellBlocks(cs); testIterators(cs); testSubSet(cs); - cs = new CellSet(descCCM); + cs = new CellSet<>(descCCM); testSubSet(cs); // cs = new CellSet(ascMultCCM); // testCellBlocks(cs); @@ -161,26 +166,26 @@ public void testCellChunkMap() throws Exception { @Test public void testAsc() throws Exception { - CellSet ascCs = new CellSet(ascCbOnHeap); + CellSet ascCs = new CellSet<>(ascCbOnHeap); assertEquals(NUM_OF_CELLS, ascCs.size()); testSubSet(ascCs); } @Test public void testDesc() throws Exception { - CellSet descCs = new CellSet(descCbOnHeap); + CellSet descCs = new CellSet<>(descCbOnHeap); assertEquals(NUM_OF_CELLS, descCs.size()); testSubSet(descCs); } - private void testSubSet(CellSet cs) throws Exception { + private void testSubSet(CellSet cs) throws Exception { for (int i = 0; i != ascCells.length; ++i) { - NavigableSet excludeTail = cs.tailSet(ascCells[i], false); - NavigableSet includeTail = cs.tailSet(ascCells[i], true); + NavigableSet excludeTail = cs.tailSet(ascCells[i], false); + NavigableSet includeTail = cs.tailSet(ascCells[i], true); assertEquals(ascCells.length - 1 - i, excludeTail.size()); assertEquals(ascCells.length - i, includeTail.size()); - Iterator excludeIter = excludeTail.iterator(); - Iterator includeIter = includeTail.iterator(); + Iterator excludeIter = excludeTail.iterator(); + Iterator includeIter = includeTail.iterator(); for (int j = 1 + i; j != ascCells.length; ++j) { assertEquals(true, CellUtil.equals(excludeIter.next(), ascCells[j])); } @@ -191,12 +196,12 @@ private void testSubSet(CellSet cs) throws Exception { assertEquals(NUM_OF_CELLS, cs.tailSet(lowerOuterCell, false).size()); assertEquals(0, cs.tailSet(upperOuterCell, false).size()); for (int i = 0; i != ascCells.length; ++i) { - NavigableSet excludeHead = cs.headSet(ascCells[i], false); - NavigableSet includeHead = cs.headSet(ascCells[i], true); + NavigableSet excludeHead = cs.headSet(ascCells[i], false); + NavigableSet includeHead = cs.headSet(ascCells[i], true); assertEquals(i, excludeHead.size()); assertEquals(i + 1, includeHead.size()); - Iterator excludeIter = excludeHead.iterator(); - Iterator includeIter = includeHead.iterator(); + Iterator excludeIter = excludeHead.iterator(); + Iterator includeIter = includeHead.iterator(); for (int j = 0; j != i; ++j) { assertEquals(true, CellUtil.equals(excludeIter.next(), ascCells[j])); } @@ -207,17 +212,17 @@ private void testSubSet(CellSet cs) throws Exception { assertEquals(0, cs.headSet(lowerOuterCell, false).size()); assertEquals(NUM_OF_CELLS, cs.headSet(upperOuterCell, false).size()); - NavigableMap sub = + NavigableMap sub = cs.getDelegatee().subMap(lowerOuterCell, true, upperOuterCell, true); assertEquals(NUM_OF_CELLS, sub.size()); - Iterator iter = sub.values().iterator(); + Iterator iter = sub.values().iterator(); for (int i = 0; i != ascCells.length; ++i) { assertEquals(true, CellUtil.equals(iter.next(), ascCells[i])); } } /* Generic basic test for immutable CellSet */ - private void testCellBlocks(CellSet cs) throws Exception { + private void testCellBlocks(CellSet cs) throws Exception { final byte[] oneAndHalf = Bytes.toBytes(20); final byte[] f = Bytes.toBytes("f"); final byte[] q = Bytes.toBytes("q"); @@ -235,12 +240,13 @@ private void testCellBlocks(CellSet cs) throws Exception { Cell last = cs.last(); assertTrue(ascCells[NUM_OF_CELLS - 1].equals(last)); - SortedSet tail = cs.tailSet(ascCells[1]); // check tail abd head sizes + SortedSet tail = cs.tailSet(ascCells[1]); // check tail abd head sizes assertEquals(NUM_OF_CELLS - 1, tail.size()); - SortedSet head = cs.headSet(ascCells[1]); + SortedSet head = cs.headSet(ascCells[1]); assertEquals(1, head.size()); - SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer cell + SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer + // cell assertEquals(NUM_OF_CELLS - 1, tailOuter.size()); Cell tailFirst = tail.first(); @@ -255,8 +261,7 @@ private void testCellBlocks(CellSet cs) throws Exception { } /* Generic iterators test for immutable CellSet */ - private void testIterators(CellSet cs) throws Exception { - + private void testIterators(CellSet cs) throws Exception { // Assert that we have NUM_OF_CELLS values and that they are in order int count = 0; for (Cell kv : cs) { @@ -273,7 +278,7 @@ private void testIterators(CellSet cs) throws Exception { // Test descending iterator count = 0; - for (Iterator i = cs.descendingIterator(); i.hasNext();) { + for (Iterator i = cs.descendingIterator(); i.hasNext();) { Cell kv = i.next(); assertEquals(ascCells[NUM_OF_CELLS - (count + 1)], kv); count++; @@ -282,8 +287,7 @@ private void testIterators(CellSet cs) throws Exception { } /* Create CellChunkMap with four cells inside the index chunk */ - private CellChunkMap setUpCellChunkMap(boolean asc) { - + private CellChunkMap setUpCellChunkMap(boolean asc) { // allocate new chunks and use the data chunk to hold the full data of the cells // and the index chunk to hold the cell-representations Chunk dataChunk = chunkCreator.getChunk(); @@ -298,9 +302,9 @@ private CellChunkMap setUpCellChunkMap(boolean asc) { int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID - Cell[] cellArray = asc ? ascCells : descCells; + ExtendedCell[] cellArray = asc ? ascCells : descCells; - for (Cell kv : cellArray) { + for (ExtendedCell kv : cellArray) { // do we have enough space to write the cell data on the data chunk? if (dataOffset + kv.getSerializedSize() > chunkCreator.getChunkSize()) { // allocate more data chunks if needed @@ -326,14 +330,14 @@ private CellChunkMap setUpCellChunkMap(boolean asc) { idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId } - return new CellChunkMap(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); + return new CellChunkMap<>(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); } /* * Create CellChunkMap with four cells inside the data jumbo chunk. This test is working only with * small chunks sized SMALL_CHUNK_SIZE (64) bytes */ - private CellChunkMap setUpJumboCellChunkMap(boolean asc) { + private CellChunkMap setUpJumboCellChunkMap(boolean asc) { int smallChunkSize = SMALL_CHUNK_SIZE + 8; // allocate new chunks and use the data JUMBO chunk to hold the full data of the cells // and the normal index chunk to hold the cell-representations @@ -350,9 +354,9 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) { int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID - Cell[] cellArray = asc ? ascCells : descCells; + ExtendedCell[] cellArray = asc ? ascCells : descCells; - for (Cell kv : cellArray) { + for (ExtendedCell kv : cellArray) { int dataStartOfset = dataOffset; dataOffset = KeyValueUtil.appendTo(kv, dataBuffer, dataOffset, false); // write deep cell data @@ -378,6 +382,6 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) { dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; } - return new CellChunkMap(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); + return new CellChunkMap<>(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java index b8ee022c9c21..d40516a501fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -45,7 +46,7 @@ public class TestCellSkipListSet { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCellSkipListSet.class); - private final CellSet csls = new CellSet(CellComparatorImpl.COMPARATOR); + private final CellSet csls = new CellSet<>(CellComparatorImpl.COMPARATOR); @Rule public TestName name = new TestName(); @@ -125,7 +126,7 @@ public void testDescendingIterator() throws Exception { } // Assert that we added 'total' values and that they are in order int count = 0; - for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { + for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { Cell kv = i.next(); assertEquals("" + (total - (count + 1)), Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); @@ -141,7 +142,7 @@ public void testDescendingIterator() throws Exception { // Assert that we added 'total' values and that they are in order and that // we are getting back value2 count = 0; - for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { + for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { Cell kv = i.next(); assertEquals("" + (total - (count + 1)), Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); @@ -164,9 +165,9 @@ public void testHeadTail() throws Exception { if (i == 1) splitter = kv; this.csls.add(kv); } - SortedSet tail = this.csls.tailSet(splitter); + SortedSet tail = this.csls.tailSet(splitter); assertEquals(2, tail.size()); - SortedSet head = this.csls.headSet(splitter); + SortedSet head = this.csls.headSet(splitter); assertEquals(1, head.size()); // Now ensure that we get back right answer even when we do tail or head. // Now overwrite with a new value. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 0f8fcea47a70..5e058b877027 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -311,7 +312,7 @@ public void testGet_memstoreAndSnapShot() throws IOException { public void testUpsertMemstoreSize() throws Exception { MemStoreSize oldSize = memstore.size(); - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); @@ -368,7 +369,7 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { t = runSnapshot(memstore, true); // test the case that the timeOfOldestEdit is updated after a KV upsert - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); kv1.setSequenceId(100); l.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index adccec5d8e50..35e18d546f23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -824,7 +825,7 @@ public void testUpsertMemstoreSize() throws Exception { memstore = new DefaultMemStore(conf, CellComparatorImpl.COMPARATOR); MemStoreSize oldSize = memstore.size(); - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); @@ -886,7 +887,7 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { t = runSnapshot(memstore); // test the case that the timeOfOldestEdit is updated after a KV upsert - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); kv1.setSequenceId(100); l.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 6735e4722297..53e183f82590 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -110,11 +111,11 @@ public class TestHMobStore { private byte[] value2 = Bytes.toBytes("value2"); private Path mobFilePath; private Date currentDate = new Date(); - private Cell seekKey1; - private Cell seekKey2; - private Cell seekKey3; + private ExtendedCell seekKey1; + private ExtendedCell seekKey2; + private ExtendedCell seekKey3; private NavigableSet qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR); - private List expected = new ArrayList<>(); + private List expected = new ArrayList<>(); private long id = EnvironmentEdgeManager.currentTime(); private Get get = new Get(row); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index ccc755a03580..a3fa1bb65db8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -76,12 +76,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -881,9 +881,9 @@ private static StoreFlushContext flushStore(HStore store, long id) throws IOExce * Generate a list of KeyValues for testing based on given parameters * @return the rows key-value list */ - private List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, + private List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, byte[] family) { - List kvList = new ArrayList<>(); + List kvList = new ArrayList<>(); for (int i = 1; i <= numRows; i++) { byte[] b = Bytes.toBytes(i); for (long timestamp : timestamps) { @@ -904,15 +904,15 @@ public void testMultipleTimestamps() throws IOException { init(this.name.getMethodName()); - List kvList1 = getKeyValueSet(timestamps1, numRows, qf1, family); - for (Cell kv : kvList1) { + List kvList1 = getKeyValueSet(timestamps1, numRows, qf1, family); + for (ExtendedCell kv : kvList1) { this.store.add(kv, null); } flushStore(store, id++); - List kvList2 = getKeyValueSet(timestamps2, numRows, qf1, family); - for (Cell kv : kvList2) { + List kvList2 = getKeyValueSet(timestamps2, numRows, qf1, family); + for (ExtendedCell kv : kvList2) { this.store.add(kv, null); } @@ -1199,34 +1199,37 @@ private long countMemStoreScanner(StoreScanner scanner) { public void testNumberOfMemStoreScannersAfterFlush() throws IOException { long seqId = 100; long timestamp = EnvironmentEdgeManager.currentTime(); - Cell cell0 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); + ExtendedCell cell0 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell0, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Collections.emptyList()); - Cell cell1 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); + ExtendedCell cell1 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell1, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1)); seqId = 101; timestamp = EnvironmentEdgeManager.currentTime(); - Cell cell2 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family) - .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); + ExtendedCell cell2 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family) + .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell2, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1, cell2)); } - private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeSnapshot, - List inputCellsAfterSnapshot) throws IOException { + private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeSnapshot, + List inputCellsAfterSnapshot) throws IOException { init(this.name.getMethodName() + "-" + inputCellsBeforeSnapshot.size()); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); long seqId = Long.MIN_VALUE; - for (Cell c : inputCellsBeforeSnapshot) { + for (ExtendedCell c : inputCellsBeforeSnapshot) { quals.add(CellUtil.cloneQualifier(c)); seqId = Math.max(seqId, c.getSequenceId()); } - for (Cell c : inputCellsAfterSnapshot) { + for (ExtendedCell c : inputCellsAfterSnapshot) { quals.add(CellUtil.cloneQualifier(c)); seqId = Math.max(seqId, c.getSequenceId()); } @@ -1259,17 +1262,16 @@ private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeS } } - private Cell createCell(byte[] qualifier, long ts, long sequenceId, byte[] value) + private ExtendedCell createCell(byte[] qualifier, long ts, long sequenceId, byte[] value) throws IOException { return createCell(row, qualifier, ts, sequenceId, value); } - private Cell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, byte[] value) - throws IOException { - Cell c = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qualifier).setTimestamp(ts).setType(Cell.Type.Put).setValue(value).build(); - PrivateCellUtil.setSequenceId(c, sequenceId); - return c; + private ExtendedCell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, + byte[] value) throws IOException { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) + .setFamily(family).setQualifier(qualifier).setTimestamp(ts).setType(Cell.Type.Put) + .setValue(value).setSequenceId(sequenceId).build(); } @Test @@ -1974,8 +1976,8 @@ public void testCompactingMemStoreNoCellButDataSizeExceedsInmemoryFlushSize() byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell); int largeCellByteSize = MutableSegment.getCellLength(largeCell); int flushByteSize = smallCellByteSize + largeCellByteSize - 2; @@ -2021,7 +2023,7 @@ public void testCompactingMemStoreNoCellButDataSizeExceedsInmemoryFlushSize() for (int i = 0; i < 100; i++) { long currentTimestamp = timestamp + 100 + i; - Cell cell = createCell(qf2, currentTimestamp, seqId, largeValue); + ExtendedCell cell = createCell(qf2, currentTimestamp, seqId, largeValue); store.add(cell, new NonThreadSafeMemStoreSizing()); } } finally { @@ -2050,7 +2052,7 @@ public void testCompactingMemStoreCellExceedInmemoryFlushSize() throws Exception MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); long timestamp = EnvironmentEdgeManager.currentTime(); long seqId = 100; - Cell cell = createCell(qf1, timestamp, seqId, value); + ExtendedCell cell = createCell(qf1, timestamp, seqId, value); int cellByteSize = MutableSegment.getCellLength(cell); store.add(cell, memStoreSizing); assertTrue(memStoreSizing.getCellsCount() == 1); @@ -2075,9 +2077,9 @@ public void testForceCloneOfBigCellForCellChunkImmutableSegment() throws Excepti final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; final byte[] rowKey1 = Bytes.toBytes("rowKey1"); - final Cell originalCell1 = createCell(rowKey1, qf1, timestamp, seqId, cellValue); + final ExtendedCell originalCell1 = createCell(rowKey1, qf1, timestamp, seqId, cellValue); final byte[] rowKey2 = Bytes.toBytes("rowKey2"); - final Cell originalCell2 = createCell(rowKey2, qf1, timestamp, seqId, cellValue); + final ExtendedCell originalCell2 = createCell(rowKey2, qf1, timestamp, seqId, cellValue); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(qf1); @@ -2110,9 +2112,9 @@ public void testForceCloneOfBigCellForCellChunkImmutableSegment() throws Excepti StoreScanner storeScanner = (StoreScanner) store.getScanner(new Scan(new Get(rowKey1)), quals, seqId + 1); SegmentScanner segmentScanner = getTypeKeyValueScanner(storeScanner, SegmentScanner.class); - Cell resultCell1 = segmentScanner.next(); + ExtendedCell resultCell1 = segmentScanner.next(); assertTrue(CellUtil.equals(resultCell1, originalCell1)); - int cell1ChunkId = ((ExtendedCell) resultCell1).getChunkId(); + int cell1ChunkId = resultCell1.getChunkId(); assertTrue(cell1ChunkId != ExtendedCell.CELL_NOT_BASED_ON_CHUNK); assertNull(segmentScanner.next()); segmentScanner.close(); @@ -2216,7 +2218,7 @@ private void doWriteTestLargeCellAndSmallCellConcurrently(IntBinaryOperator getF try { for (int i = 1; i <= MyCompactingMemStore3.CELL_COUNT; i++) { long currentTimestamp = timestamp + i; - Cell cell = createCell(qf1, currentTimestamp, seqId, smallValue); + ExtendedCell cell = createCell(qf1, currentTimestamp, seqId, smallValue); totalCellByteSize.addAndGet(MutableSegment.getCellLength(cell)); store.add(cell, memStoreSizing); } @@ -2247,7 +2249,7 @@ private void doWriteTestLargeCellAndSmallCellConcurrently(IntBinaryOperator getF Thread.currentThread().setName(MyCompactingMemStore3.LARGE_CELL_THREAD_NAME); for (int i = 1; i <= MyCompactingMemStore3.CELL_COUNT; i++) { long currentTimestamp = timestamp + i; - Cell cell = createCell(qf2, currentTimestamp, seqId, largeValue); + ExtendedCell cell = createCell(qf2, currentTimestamp, seqId, largeValue); totalCellByteSize.addAndGet(MutableSegment.getCellLength(cell)); store.add(cell, memStoreSizing); } @@ -2314,8 +2316,8 @@ public void testFlattenAndSnapshotCompactingMemStoreConcurrently() throws Except byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell); int largeCellByteSize = MutableSegment.getCellLength(largeCell); int totalCellByteSize = (smallCellByteSize + largeCellByteSize); @@ -2417,8 +2419,8 @@ public void testFlattenSnapshotWriteCompactingMemeStoreConcurrently() throws Exc byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell); int largeCellByteSize = MutableSegment.getCellLength(largeCell); int firstWriteCellByteSize = (smallCellByteSize + largeCellByteSize); @@ -2439,8 +2441,8 @@ public void testFlattenSnapshotWriteCompactingMemeStoreConcurrently() throws Exc store.add(largeCell, new NonThreadSafeMemStoreSizing()); final AtomicReference exceptionRef = new AtomicReference(); - final Cell writeAgainCell1 = createCell(qf3, timestamp, seqId + 1, largeValue); - final Cell writeAgainCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); + final ExtendedCell writeAgainCell1 = createCell(qf3, timestamp, seqId + 1, largeValue); + final ExtendedCell writeAgainCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); final int writeAgainCellByteSize = MutableSegment.getCellLength(writeAgainCell1) + MutableSegment.getCellLength(writeAgainCell2); final Thread writeAgainThread = new Thread(() -> { @@ -2519,8 +2521,8 @@ public void testClearSnapshotGetScannerConcurrently() throws Exception { byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(qf1); quals.add(qf2); @@ -2669,15 +2671,14 @@ public CustomDefaultMemStore(Configuration conf, CellComparator c, */ @Test public void testMemoryLeakWhenFlushMemStoreRetrying() throws Exception { - Configuration conf = HBaseConfiguration.create(); byte[] smallValue = new byte[3]; byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(qf1); quals.add(qf2); @@ -2784,12 +2785,12 @@ public void testImmutableMemStoreLABRefCnt() throws Exception { byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell1 = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell1 = createCell(qf2, timestamp, seqId, largeValue); - final Cell smallCell2 = createCell(qf3, timestamp, seqId + 1, smallValue); - final Cell largeCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); - final Cell smallCell3 = createCell(qf5, timestamp, seqId + 2, smallValue); - final Cell largeCell3 = createCell(qf6, timestamp, seqId + 2, largeValue); + final ExtendedCell smallCell1 = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell1 = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell2 = createCell(qf3, timestamp, seqId + 1, smallValue); + final ExtendedCell largeCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); + final ExtendedCell smallCell3 = createCell(qf5, timestamp, seqId + 2, smallValue); + final ExtendedCell largeCell3 = createCell(qf6, timestamp, seqId + 2, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell1); int largeCellByteSize = MutableSegment.getCellLength(largeCell1); @@ -3165,7 +3166,8 @@ protected boolean checkAndAddToActiveSize(MutableSegment currActive, Cell cellTo } @Override - protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing memstoreSizing) { + protected void doAdd(MutableSegment currentActive, ExtendedCell cell, + MemStoreSizing memstoreSizing) { if (Thread.currentThread().getName().equals(SMALL_CELL_THREAD_NAME)) { try { /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java index d866acd42a4b..fea25b424e10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -58,16 +59,16 @@ public class TestKeyValueHeap { private byte[] col5 = Bytes.toBytes("col5"); // Variable name encoding. kv - Cell kv111 = new KeyValue(row1, fam1, col1, data); - Cell kv112 = new KeyValue(row1, fam1, col2, data); - Cell kv113 = new KeyValue(row1, fam1, col3, data); - Cell kv114 = new KeyValue(row1, fam1, col4, data); - Cell kv115 = new KeyValue(row1, fam1, col5, data); - Cell kv121 = new KeyValue(row1, fam2, col1, data); - Cell kv122 = new KeyValue(row1, fam2, col2, data); - Cell kv211 = new KeyValue(row2, fam1, col1, data); - Cell kv212 = new KeyValue(row2, fam1, col2, data); - Cell kv213 = new KeyValue(row2, fam1, col3, data); + ExtendedCell kv111 = new KeyValue(row1, fam1, col1, data); + ExtendedCell kv112 = new KeyValue(row1, fam1, col2, data); + ExtendedCell kv113 = new KeyValue(row1, fam1, col3, data); + ExtendedCell kv114 = new KeyValue(row1, fam1, col4, data); + ExtendedCell kv115 = new KeyValue(row1, fam1, col5, data); + ExtendedCell kv121 = new KeyValue(row1, fam2, col1, data); + ExtendedCell kv122 = new KeyValue(row1, fam2, col2, data); + ExtendedCell kv211 = new KeyValue(row2, fam1, col1, data); + ExtendedCell kv212 = new KeyValue(row2, fam1, col2, data); + ExtendedCell kv213 = new KeyValue(row2, fam1, col3, data); TestScanner s1 = new TestScanner(Arrays.asList(kv115, kv211, kv212)); TestScanner s2 = new TestScanner(Arrays.asList(kv111, kv112)); @@ -121,7 +122,7 @@ public void testSeek() throws IOException { // Creating KeyValueHeap try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) { - Cell seekKv = new KeyValue(row2, fam1, null, null); + ExtendedCell seekKv = new KeyValue(row2, fam1, null, null); kvh.seek(seekKv); List actual = Arrays.asList(kvh.peek()); @@ -195,8 +196,8 @@ public void testScannerException() throws IOException { @Test public void testPriorityId() throws IOException { - Cell kv113A = new KeyValue(row1, fam1, col3, Bytes.toBytes("aaa")); - Cell kv113B = new KeyValue(row1, fam1, col3, Bytes.toBytes("bbb")); + ExtendedCell kv113A = new KeyValue(row1, fam1, col3, Bytes.toBytes("aaa")); + ExtendedCell kv113B = new KeyValue(row1, fam1, col3, Bytes.toBytes("bbb")); TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 1); TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 2); List expected = Arrays.asList(kv111, kv112, kv113B, kv113A); @@ -212,11 +213,11 @@ private static class TestScanner extends CollectionBackedScanner { private boolean closed = false; private long scannerOrder = 0; - public TestScanner(List list) { + public TestScanner(List list) { super(list); } - public TestScanner(List list, long scannerOrder) { + public TestScanner(List list, long scannerOrder) { this(list); this.scannerOrder = scannerOrder; } @@ -240,7 +241,7 @@ private static class SeekTestScanner extends TestScanner { private int closedNum = 0; private boolean realSeekDone = true; - public SeekTestScanner(List list) { + public SeekTestScanner(List list) { super(list); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java index e91085edd2f0..cd91d39c77b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; @@ -333,7 +334,7 @@ public void testForceCopyOfBigCellInto() { } private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName, - Cell cellToCopyInto) { + ExtendedCell cellToCopyInto) { Thread thread = new Thread() { volatile boolean stopped = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java index f8d09a2c9c15..9b32558edf12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java @@ -28,7 +28,7 @@ import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; @@ -153,7 +153,7 @@ public void testLABChunkQueueWithMultipleMSLABs() throws Exception { } private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName, - Cell cellToCopyInto) { + ExtendedCell cellToCopyInto) { Thread thread = new Thread() { volatile boolean stopped = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index e7c11517268e..26d5cadd97b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -255,7 +256,7 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { Put put = new Put(key); put.addColumn(FAMILY, COLUMN, value); for (Cell c : put.get(FAMILY, COLUMN)) { - writer.append(c); + writer.append((ExtendedCell) c); } writer.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 91717060d998..1a6801666145 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -113,7 +114,7 @@ public class TestStoreScanner { * to test scan does the right thing as it we do Gets, StoreScanner#optimize, and what we do on * (faked) block boundaries. */ - private static final Cell[] CELL_GRID = new Cell[] { + private static final ExtendedCell[] CELL_GRID = new ExtendedCell[] { ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) .build(), @@ -193,7 +194,7 @@ public KeyValueHeapWithCount(List scanners, } @Override - public Cell peek() { + public ExtendedCell peek() { this.count.incrementAndGet(); return super.peek(); } @@ -230,7 +231,7 @@ protected KeyValueHeap newKVHeap(List scanners, } @Override - protected boolean trySkipToNextRow(Cell cell) throws IOException { + protected boolean trySkipToNextRow(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextRow(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -241,7 +242,7 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { } @Override - protected boolean trySkipToNextColumn(Cell cell) throws IOException { + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -252,7 +253,7 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { // Fake block boundaries by having index of next block change as we go through scan. return count.get() > CELL_GRID_BLOCK4_BOUNDARY ? PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK5_BOUNDARY]) @@ -266,7 +267,7 @@ public Cell getNextIndexedKey() { private static final int CELL_WITH_VERSIONS_BLOCK2_BOUNDARY = 4; - private static final Cell[] CELL_WITH_VERSIONS = new Cell[] { + private static final ExtendedCell[] CELL_WITH_VERSIONS = new ExtendedCell[] { ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) .setQualifier(ONE).setTimestamp(2L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) .build(), @@ -298,7 +299,7 @@ Arrays. asList(new KeyValueScanner[] { } @Override - protected boolean trySkipToNextColumn(Cell cell) throws IOException { + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -309,7 +310,7 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { // Fake block boundaries by having index of next block change as we go through scan. return PrivateCellUtil .createFirstOnRow(CELL_WITH_VERSIONS[CELL_WITH_VERSIONS_BLOCK2_BOUNDARY]); @@ -327,7 +328,7 @@ Arrays. asList(new KeyValueScanner[] { } @Override - protected boolean trySkipToNextColumn(Cell cell) throws IOException { + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -338,7 +339,7 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 8cabe1fb3632..efbb4c77d475 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; @@ -64,8 +65,8 @@ public void testNeverIncludeFakeCell() throws IOException { new ScanInfo(this.conf, fam2, 10, 1, ttl, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), get.getFamilyMap().get(fam2), now - ttl, now, null); - Cell kv = new KeyValue(row1, fam2, col2, 1, data); - Cell cell = PrivateCellUtil.createLastOnRowCol(kv); + ExtendedCell kv = new KeyValue(row1, fam2, col2, 1, data); + ExtendedCell cell = PrivateCellUtil.createLastOnRowCol(kv); qm.setToNewRow(kv); MatchCode code = qm.match(cell); assertFalse(code.compareTo(MatchCode.SEEK_NEXT_COL) != 0); @@ -391,7 +392,7 @@ scanWithFilter, new ScanInfo(this.conf, fam2, 0, 5, ttl, KeepDeletedCells.FALSE, // For last cell, the query matcher will return SEEK_NEXT_COL, and the // ColumnTracker will skip to the next column, which is col4. - Cell lastCell = memstore.get(memstore.size() - 1); + ExtendedCell lastCell = memstore.get(memstore.size() - 1); Cell nextCell = qm.getKeyForNextColumn(lastCell); assertArrayEquals(nextCell.getQualifierArray(), col4); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 833fd28f5615..947c14e716f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -50,6 +50,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -514,7 +515,7 @@ public Object run() throws Exception { final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override - protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreSizing) { + protected void restoreEdit(HStore s, ExtendedCell cell, MemStoreSizing memstoreSizing) { super.restoreEdit(s, cell, memstoreSizing); countOfRestoredEdits.incrementAndGet(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index 5d75fca72c88..20f621fba61a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; @@ -112,7 +113,7 @@ public static void createHFile(Configuration configuration, FileSystem fs, Path try { // subtract 2 since iterateOnSplits doesn't include boundary keys for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) { - Cell kv = new KeyValue(key, family, qualifier, now, key); + ExtendedCell kv = new KeyValue(key, family, qualifier, now, key); if (withTag) { // add a tag. Arbitrarily chose mob tag since we have a helper already. Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key); From ff197587f08a78fefc058a83f86009523ddaa36a Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 21 Jun 2024 12:15:46 +0800 Subject: [PATCH 426/514] HBASE-28617 Addendum fix doap category --- src/site/resources/doap_Hbase.rdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/site/resources/doap_Hbase.rdf b/src/site/resources/doap_Hbase.rdf index 09f41fd3655a..1c6911d25dc0 100644 --- a/src/site/resources/doap_Hbase.rdf +++ b/src/site/resources/doap_Hbase.rdf @@ -33,7 +33,7 @@ Java - + Apache HBase From f8aa3e23126c3ed0fd2558ecf1cec205d8edc928 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Fri, 21 Jun 2024 09:17:54 -0400 Subject: [PATCH 427/514] HBASE-28680 BackupLogCleaner causes HMaster WALs to pile up indefinitely (#6006) We have been trying to setup daily incremental backups for hundreds of clusters at my day job. Recently we discovered that old WALs were piling up across many clusters inline with when we began running incremental backups. This led to the realization that the BackupLogCleaner will always skip archived HMaster WALs. This is a problem because, if a cleaner is skipping a given file, then the CleanerChore will never delete it. This seems like a misunderstanding of what it means to "skip" a WAL in a BaseLogCleanerDelegate, and, instead, we should always return these HMaster WALs as deletable from the perspective of the BackupLogCleaner. We could subject them to the same scrutiny as RegionServer WALs: are they older than the most recent successful backup? But, if I understand correctly, HMaster WALs do not contain any data relevant to table backups, so that would be unnecessary. Co-authored-by: Ray Mattingly Signed-off-by: Nick Dimiduk --- .../hbase/backup/master/BackupLogCleaner.java | 58 ++++++++++++------- .../backup/master/TestBackupLogCleaner.java | 19 ++++++ 2 files changed, 57 insertions(+), 20 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index f3ddda499b0f..1b53aa1d67f9 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; +import org.apache.hadoop.hbase.master.region.MasterRegionFactory; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -123,27 +125,8 @@ public Iterable getDeletableFiles(Iterable files) { return Collections.emptyList(); } for (FileStatus file : files) { - String fn = file.getPath().getName(); - if (fn.startsWith(WALProcedureStore.LOG_PREFIX)) { + if (canDeleteFile(addressToLastBackupMap, file.getPath())) { filteredFiles.add(file); - continue; - } - - try { - Address walServerAddress = - Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath())); - long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName()); - - if ( - !addressToLastBackupMap.containsKey(walServerAddress) - || addressToLastBackupMap.get(walServerAddress) >= walTimestamp - ) { - filteredFiles.add(file); - } - } catch (Exception ex) { - LOG.warn( - "Error occurred while filtering file: {} with error: {}. Ignoring cleanup of this log", - file.getPath(), ex.getMessage()); } } @@ -176,4 +159,39 @@ public void stop(String why) { public boolean isStopped() { return this.stopped; } + + protected static boolean canDeleteFile(Map addressToLastBackupMap, Path path) { + if (isHMasterWAL(path)) { + return true; + } + + try { + String hostname = BackupUtils.parseHostNameFromLogFile(path); + if (hostname == null) { + LOG.warn( + "Cannot parse hostname from RegionServer WAL file: {}. Ignoring cleanup of this log", + path); + return false; + } + Address walServerAddress = Address.fromString(hostname); + long walTimestamp = AbstractFSWALProvider.getTimestamp(path.getName()); + + if ( + !addressToLastBackupMap.containsKey(walServerAddress) + || addressToLastBackupMap.get(walServerAddress) >= walTimestamp + ) { + return true; + } + } catch (Exception ex) { + LOG.warn("Error occurred while filtering file: {}. Ignoring cleanup of this log", path, ex); + return false; + } + return false; + } + + private static boolean isHMasterWAL(Path path) { + String fn = path.getName(); + return fn.startsWith(WALProcedureStore.LOG_PREFIX) + || fn.endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX); + } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 2b0f9c0cba5f..e372c6ad1533 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -20,10 +20,12 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupType; @@ -132,4 +134,21 @@ public void testBackupLogCleaner() throws Exception { conn.close(); } } + + @Test + public void testCleansUpHMasterWal() { + Path path = new Path("/hbase/MasterData/WALs/hmaster,60000,1718808578163"); + assertTrue(BackupLogCleaner.canDeleteFile(Collections.emptyMap(), path)); + } + + @Test + public void testCleansUpArchivedHMasterWal() { + Path normalPath = + new Path("/hbase/oldWALs/hmaster%2C60000%2C1716224062663.1716247552189$masterlocalwal$"); + assertTrue(BackupLogCleaner.canDeleteFile(Collections.emptyMap(), normalPath)); + + Path masterPath = new Path( + "/hbase/MasterData/oldWALs/hmaster%2C60000%2C1716224062663.1716247552189$masterlocalwal$"); + assertTrue(BackupLogCleaner.canDeleteFile(Collections.emptyMap(), masterPath)); + } } From dd694e4d74254965da8097ef06885ad94a3791e5 Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Sat, 22 Jun 2024 01:02:20 +0800 Subject: [PATCH 428/514] HBASE-28656 Optimize the verifyCopyResult logic in ExportSnapshot (#5996) Signed-off-by: Duo Zhang Signed-off-by: Wei-Chiu Chuang --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 76 ++++++++++++++++--- .../hbase/snapshot/TestExportSnapshot.java | 26 ++++++- .../snapshot/TestExportSnapshotAdjunct.java | 4 +- .../TestExportSnapshotV1NoCluster.java | 2 +- 4 files changed, 92 insertions(+), 16 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index fd69960b78da..4e0c54b718bb 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -168,6 +168,15 @@ public enum Counter { BYTES_COPIED } + /** + * Indicates the checksum comparison result. + */ + public enum ChecksumComparison { + TRUE, // checksum comparison is compatible and true. + FALSE, // checksum comparison is compatible and false. + INCOMPATIBLE, // checksum comparison is not compatible. + } + private static class ExportMapper extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(ExportMapper.class); @@ -533,6 +542,9 @@ private FileChecksum getFileChecksum(final FileSystem fs, final Path path) { } } + /** + * Utility to compare the file length and checksums for the paths specified. + */ private void verifyCopyResult(final FileStatus inputStat, final FileStatus outputStat) throws IOException { long inputLen = inputStat.getLen(); @@ -547,20 +559,64 @@ private void verifyCopyResult(final FileStatus inputStat, final FileStatus outpu // If length==0, we will skip checksum if (inputLen != 0 && verifyChecksum) { - FileChecksum inChecksum = getFileChecksum(inputFs, inputPath); - if (inChecksum == null) { - LOG.warn("Input file " + inputPath + " checksums are not available"); - } - FileChecksum outChecksum = getFileChecksum(outputFs, outputPath); - if (outChecksum == null) { - LOG.warn("Output file " + outputPath + " checksums are not available"); - } - if (inChecksum != null && outChecksum != null && !inChecksum.equals(outChecksum)) { - throw new IOException("Checksum mismatch between " + inputPath + " and " + outputPath); + FileChecksum inChecksum = getFileChecksum(inputFs, inputStat.getPath()); + FileChecksum outChecksum = getFileChecksum(outputFs, outputStat.getPath()); + + ChecksumComparison checksumComparison = verifyChecksum(inChecksum, outChecksum); + if (!checksumComparison.equals(ChecksumComparison.TRUE)) { + StringBuilder errMessage = new StringBuilder("Checksum mismatch between ") + .append(inputPath).append(" and ").append(outputPath).append("."); + + boolean addSkipHint = false; + String inputScheme = inputFs.getScheme(); + String outputScheme = outputFs.getScheme(); + if (!inputScheme.equals(outputScheme)) { + errMessage.append(" Input and output filesystems are of different types.\n") + .append("Their checksum algorithms may be incompatible."); + addSkipHint = true; + } else if (inputStat.getBlockSize() != outputStat.getBlockSize()) { + errMessage.append(" Input and output differ in block-size."); + addSkipHint = true; + } else if ( + inChecksum != null && outChecksum != null + && !inChecksum.getAlgorithmName().equals(outChecksum.getAlgorithmName()) + ) { + errMessage.append(" Input and output checksum algorithms are of different types."); + addSkipHint = true; + } + if (addSkipHint) { + errMessage + .append(" You can choose file-level checksum validation via " + + "-Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes" + + " or filesystems are different.") + .append(" Or you can skip checksum-checks altogether with --no-checksum-verify.\n") + .append(" (NOTE: By skipping checksums, one runs the risk of " + + "masking data-corruption during file-transfer.)\n"); + } + throw new IOException(errMessage.toString()); } } } + /** + * Utility to compare checksums + */ + private ChecksumComparison verifyChecksum(final FileChecksum inChecksum, + final FileChecksum outChecksum) { + // If the input or output checksum is null, or the algorithms of input and output are not + // equal, that means there is no comparison + // and return not compatible. else if matched, return compatible with the matched result. + if ( + inChecksum == null || outChecksum == null + || !inChecksum.getAlgorithmName().equals(outChecksum.getAlgorithmName()) + ) { + return ChecksumComparison.INCOMPATIBLE; + } else if (inChecksum.equals(outChecksum)) { + return ChecksumComparison.TRUE; + } + return ChecksumComparison.FALSE; + } + /** * Check if the two files are equal by looking at the file length, and at the checksum (if user * has specified the verifyChecksum flag). diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 4dcadc755da3..813da956799e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -227,6 +227,23 @@ public void testConsecutiveExports() throws Exception { removeExportDir(copyDir); } + @Test + public void testExportWithChecksum() throws Exception { + // Test different schemes: input scheme is hdfs:// and output scheme is file:// + // The checksum verification will fail + Path copyLocalDir = getLocalDestinationDir(TEST_UTIL); + testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, snapshotName, + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyLocalDir, false, false, + getBypassRegionPredicate(), false, true); + + // Test same schemes: input scheme is hdfs:// and output scheme is hdfs:// + // The checksum verification will success + Path copyHdfsDir = getHdfsDestinationDir(); + testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, snapshotName, + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyHdfsDir, false, false, + getBypassRegionPredicate(), true, true); + } + @Test public void testExportWithTargetName() throws Exception { final String targetName = "testExportWithTargetName"; @@ -281,7 +298,7 @@ protected void testExportFileSystemState(final TableName tableName, final String throws Exception { testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, resetTtl, - getBypassRegionPredicate(), true); + getBypassRegionPredicate(), true, false); } /** @@ -290,8 +307,8 @@ protected void testExportFileSystemState(final TableName tableName, final String protected static void testExportFileSystemState(final Configuration conf, final TableName tableName, final String snapshotName, final String targetName, final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, - final boolean resetTtl, final RegionPredicate bypassregionPredicate, boolean success) - throws Exception { + final boolean resetTtl, final RegionPredicate bypassregionPredicate, final boolean success, + final boolean checksumVerify) throws Exception { FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); @@ -312,6 +329,9 @@ protected static void testExportFileSystemState(final Configuration conf, if (resetTtl) { opts.add("--reset-ttl"); } + if (!checksumVerify) { + opts.add("--no-checksum-verify"); + } // Export Snapshot int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java index a2db1c688207..9453b9fcaf46 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java @@ -151,7 +151,7 @@ public void testExportRetry() throws Exception { conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); conf.setInt("mapreduce.map.maxattempts", 3); TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, false, null, true); + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, false, null, true, false); } /** @@ -167,6 +167,6 @@ public void testExportFailure() throws Exception { conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); conf.setInt("mapreduce.map.maxattempts", 3); TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, false, null, false); + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, false, null, false, false); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index 19496fcfe414..0215711070f4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -125,7 +125,7 @@ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, TableName tableName = builder.getTableDescriptor().getTableName(); TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, snapshotName, snapshotName, snapshotFilesCount, testDir, - getDestinationDir(fs, testUtil, testDir), false, false, null, true); + getDestinationDir(fs, testUtil, testDir), false, false, null, true, false); } static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtil hctu, Path testDir) From 17ce7c3b4c5e58af1f70d17e62940c9dd6fb4660 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 22 Jun 2024 10:22:56 +0800 Subject: [PATCH 429/514] HBASE-28679 Upgrade yetus to a newer version (#6012) Signed-off-by: Nick Dimiduk --- dev-support/Jenkinsfile | 8 +-- dev-support/Jenkinsfile_GitHub | 55 +++++++++---------- dev-support/docker/Dockerfile | 2 + dev-support/hbase_nightly_yetus.sh | 10 ++-- dev-support/jenkins_precommit_github_yetus.sh | 13 +++-- 5 files changed, 45 insertions(+), 43 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 461df28c8e9d..227229bf8d84 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -31,7 +31,7 @@ pipeline { disableConcurrentBuilds() } environment { - YETUS_RELEASE = '0.12.0' + YETUS_RELEASE = '0.15.0' // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure. OUTPUT_DIR_RELATIVE_GENERAL = 'output-general' OUTPUT_DIR_RELATIVE_JDK8_HADOOP2 = 'output-jdk8-hadoop2' @@ -43,12 +43,12 @@ pipeline { PROJECT_PERSONALITY = 'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh' PERSONALITY_FILE = 'tools/personality.sh' // This section of the docs tells folks not to use the javadoc tag. older branches have our old version of the check for said tag. - AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh' - WHITESPACE_IGNORE_LIST = '.*/generated/.*' + AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc' + BLANKS_IGNORE_LIST = '.*/generated/.*' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. - TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' + TESTS_FILTER = 'checkstyle,javac,javadoc,pylint,shellcheck,shelldocs,blanks,perlcritic,ruby-lint,rubocop' EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/excludes" // TODO does hadoopcheck need to be jdk specific? SHALLOW_CHECKS = 'all,-shadedjars,-unit' // run by the 'yetus general check' diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index 62b16287e1e4..2ea1ef697b4e 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -36,14 +36,16 @@ pipeline { YETUS_REL = 'yetus' DOCKERFILE_REL = "${SRC_REL}/dev-support/docker/Dockerfile" YETUS_DRIVER_REL = "${SRC_REL}/dev-support/jenkins_precommit_github_yetus.sh" - // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION = 'rel/0.12.0' + YETUS_VERSION = '0.15.0' GENERAL_CHECK_PLUGINS = 'all,-javadoc,-jira,-shadedjars,-unit' JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' + // This section of the docs tells folks not to use the javadoc tag. older branches have our old version of the check for said tag. + AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc' + BLANKS_IGNORE_LIST = '.*/generated/.*' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. - TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' + TESTS_FILTER = 'checkstyle,javac,javadoc,pylint,shellcheck,shelldocs,blanks,perlcritic,ruby-lint,rubocop' EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/output/excludes" // set build parallel BUILD_THREAD = 4 @@ -102,11 +104,11 @@ pipeline { checkout scm } dir("${YETUSDIR}") { - checkout([ - $class : 'GitSCM', - branches : [[name: "${YETUS_VERSION}"]], - userRemoteConfigs: [[url: 'https://github.com/apache/yetus.git']]] - ) + sh'''#!/usr/bin/env bash + wget https://dlcdn.apache.org/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + tar --strip-components=1 -xzf apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + rm apache-yetus-${YETUS_VERSION}-bin.tar.gz + ''' } dir("${WORKDIR}") { withCredentials([ @@ -229,11 +231,11 @@ pipeline { checkout scm } dir("${YETUSDIR}") { - checkout([ - $class : 'GitSCM', - branches : [[name: "${YETUS_VERSION}"]], - userRemoteConfigs: [[url: 'https://github.com/apache/yetus.git']]] - ) + sh'''#!/usr/bin/env bash + wget https://dlcdn.apache.org/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + tar --strip-components=1 -xzf apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + rm apache-yetus-${YETUS_VERSION}-bin.tar.gz + ''' } dir("${WORKDIR}") { withCredentials([ @@ -365,11 +367,11 @@ pipeline { checkout scm } dir("${YETUSDIR}") { - checkout([ - $class : 'GitSCM', - branches : [[name: "${YETUS_VERSION}"]], - userRemoteConfigs: [[url: 'https://github.com/apache/yetus.git']]] - ) + sh'''#!/usr/bin/env bash + wget https://dlcdn.apache.org/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + tar --strip-components=1 -xzf apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + rm apache-yetus-${YETUS_VERSION}-bin.tar.gz + ''' } dir("${WORKDIR}") { withCredentials([ @@ -493,22 +495,19 @@ pipeline { SKIP_ERRORPRONE = true } when { - allOf { - // this will return true if the pipeline is building a change request, such as a GitHub pull request. - changeRequest() - expression { env.CHANGE_TARGET in ['master', 'branch-3'] } - } + // this will return true if the pipeline is building a change request, such as a GitHub pull request. + changeRequest() } steps { dir("${SOURCEDIR}") { checkout scm } dir("${YETUSDIR}") { - checkout([ - $class : 'GitSCM', - branches : [[name: "${YETUS_VERSION}"]], - userRemoteConfigs: [[url: 'https://github.com/apache/yetus.git']]] - ) + sh'''#!/usr/bin/env bash + wget https://dlcdn.apache.org/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + tar --strip-components=1 -xzf apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ + rm apache-yetus-${YETUS_VERSION}-bin.tar.gz + ''' } dir("${WORKDIR}") { withCredentials([ diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index dcd84c89c218..499397b6313c 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -53,6 +53,8 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \ ruby=1:3.0* \ ruby-dev=1:3.0* \ shellcheck='0.8.0-*' \ + libxml2-dev='2.9.13+dfsg-*' \ + libxml2-utils='2.9.13+dfsg-*' \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* \ diff --git a/dev-support/hbase_nightly_yetus.sh b/dev-support/hbase_nightly_yetus.sh index 4c671dcfef65..3423a7e73c55 100755 --- a/dev-support/hbase_nightly_yetus.sh +++ b/dev-support/hbase_nightly_yetus.sh @@ -20,7 +20,7 @@ declare -i missing_env=0 # Validate params for required_env in "TESTS" "PERSONALITY_FILE" "BASEDIR" "ARCHIVE_PATTERN_LIST" "OUTPUT_DIR_RELATIVE" \ "OUTPUT_DIR" "PROJECT" "AUTHOR_IGNORE_LIST" \ - "WHITESPACE_IGNORE_LIST" "BRANCH_NAME" "TESTS_FILTER" "DEBUG" \ + "BLANKS_IGNORE_LIST" "BRANCH_NAME" "TESTS_FILTER" "DEBUG" \ "USE_YETUS_PRERELEASE" "WORKSPACE" "YETUS_RELEASE"; do if [ -z "${!required_env}" ]; then echo "[ERROR] Required environment variable '${required_env}' is not set." @@ -59,8 +59,8 @@ YETUS_ARGS=("--patch-dir=${OUTPUT_DIR}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--project=${PROJECT}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--resetrepo" "${YETUS_ARGS[@]}") YETUS_ARGS=("--author-ignore-list=${AUTHOR_IGNORE_LIST}" "${YETUS_ARGS[@]}") -YETUS_ARGS=("--whitespace-eol-ignore-list=${WHITESPACE_IGNORE_LIST}" "${YETUS_ARGS[@]}") -YETUS_ARGS=("--whitespace-tabs-ignore-list=${WHITESPACE_IGNORE_LIST}" "${YETUS_ARGS[@]}") +YETUS_ARGS=("--blanks-eol-ignore-list=${BLANKS_IGNORE_LIST}" "${YETUS_ARGS[@]}") +YETUS_ARGS=("--blanks-tabs-ignore-list=${BLANKS_IGNORE_LIST}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--sentinel" "${YETUS_ARGS[@]}") YETUS_ARGS=("--branch=${BRANCH_NAME}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--tests-filter=${TESTS_FILTER}" "${YETUS_ARGS[@]}") @@ -106,11 +106,9 @@ if [[ -n "${JAVA8_HOME}" ]]; then fi if [[ true != "${USE_YETUS_PRERELEASE}" ]]; then - YETUS_ARGS=("--shelldocs=${WORKSPACE}/yetus-${YETUS_RELEASE}/bin/shelldocs" "${YETUS_ARGS[@]}") TESTPATCHBIN="${WORKSPACE}/yetus-${YETUS_RELEASE}/bin/test-patch" else - YETUS_ARGS=("--shelldocs=${WORKSPACE}/yetus-git/shelldocs/shelldocs.py" "${YETUS_ARGS[@]}") - TESTPATCHBIN="${WORKSPACE}/yetus-git/precommit/test-patch.sh" + TESTPATCHBIN="${WORKSPACE}/yetus-git/precommit/src/main/shell/test-patch.sh" fi echo "Launching yetus with command line:" echo "${TESTPATCHBIN} ${YETUS_ARGS[*]}" diff --git a/dev-support/jenkins_precommit_github_yetus.sh b/dev-support/jenkins_precommit_github_yetus.sh index 0f5de550f8e6..845d4ab4e245 100755 --- a/dev-support/jenkins_precommit_github_yetus.sh +++ b/dev-support/jenkins_precommit_github_yetus.sh @@ -41,6 +41,8 @@ declare -a required_envs=( "SOURCEDIR" "TESTS_FILTER" "YETUSDIR" + "AUTHOR_IGNORE_LIST" + "BLANKS_IGNORE_LIST" ) # Validate params for required_env in "${required_envs[@]}"; do @@ -57,7 +59,7 @@ if [ ${missing_env} -gt 0 ]; then fi # TODO (HBASE-23900): cannot assume test-patch runs directly from sources -TESTPATCHBIN="${YETUSDIR}/precommit/src/main/shell/test-patch.sh" +TESTPATCHBIN="${YETUSDIR}/bin/test-patch" # this must be clean for every run rm -rf "${PATCHDIR}" @@ -87,8 +89,8 @@ YETUS_ARGS+=("--brief-report-file=${PATCHDIR}/brief.txt") YETUS_ARGS+=("--console-report-file=${PATCHDIR}/console.txt") YETUS_ARGS+=("--html-report-file=${PATCHDIR}/report.html") # enable writing back to Github -YETUS_ARGS+=("--github-password=${GITHUB_PASSWORD}") -YETUS_ARGS+=("--github-user=${GITHUB_USER}") +YETUS_ARGS+=("--github-token=${GITHUB_PASSWORD}") +YETUS_ARGS+=("--github-write-comment") # auto-kill any surefire stragglers during unit test runs YETUS_ARGS+=("--reapermode=kill") # set relatively high limits for ASF machines @@ -109,8 +111,9 @@ YETUS_ARGS+=("--docker") YETUS_ARGS+=("--dockerfile=${DOCKERFILE}") YETUS_ARGS+=("--mvn-custom-repos") YETUS_ARGS+=("--java-home=${SET_JAVA_HOME}") -YETUS_ARGS+=("--whitespace-eol-ignore-list=.*/generated/.*") -YETUS_ARGS+=("--whitespace-tabs-ignore-list=.*/generated/.*") +YETUS_ARGS+=("--author-ignore-list=${AUTHOR_IGNORE_LIST}") +YETUS_ARGS+=("--blanks-eol-ignore-list=${BLANKS_IGNORE_LIST}") +YETUS_ARGS+=("--blanks-tabs-ignore-list=${BLANKS_IGNORE_LIST}*") YETUS_ARGS+=("--tests-filter=${TESTS_FILTER}") YETUS_ARGS+=("--personality=${SOURCEDIR}/dev-support/hbase-personality.sh") YETUS_ARGS+=("--quick-hadoopcheck") From e0ada25d001214a54005c1fdf7910c23889fa4f3 Mon Sep 17 00:00:00 2001 From: lupeng Date: Sat, 22 Jun 2024 11:32:45 +0800 Subject: [PATCH 430/514] HBASE-28658 The failsafe snapshot should be deleted after rollback successfully (#5984) Signed-off-by: Duo Zhang --- .../hbase/client/RawAsyncHBaseAdmin.java | 19 +++++-- .../hbase/client/SnapshotWithAclTestBase.java | 50 ++++++++++++++++--- .../hbase/client/TestSnapshotWithAcl.java | 5 +- .../client/TestSnapshotWithAclAsyncAdmin.java | 5 +- 4 files changed, 64 insertions(+), 15 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 634f54faa63d..232aa3e8b635 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2181,10 +2181,21 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t if (err3 != null) { future.completeExceptionally(err3); } else { - String msg = - "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" - + failSafeSnapshotSnapshotName + " succeeded."; - future.completeExceptionally(new RestoreSnapshotException(msg, err2)); + // If fail to restore snapshot but rollback successfully, delete the + // restore-failsafe snapshot. + LOG.info( + "Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); + addListener(deleteSnapshot(failSafeSnapshotSnapshotName), (ret4, err4) -> { + if (err4 != null) { + LOG.error("Unable to remove the failsafe snapshot: {}", + failSafeSnapshotSnapshotName, err4); + } + String msg = + "Restore snapshot=" + snapshotName + " failed, Rollback to snapshot=" + + failSafeSnapshotSnapshotName + " succeeded."; + LOG.error(msg); + future.completeExceptionally(new RestoreSnapshotException(msg, err2)); + }); } }); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java index 752a9ba92f99..5e37901840b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java @@ -17,13 +17,20 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; + import java.io.IOException; import java.util.List; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -33,6 +40,8 @@ import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.PermissionStorage; import org.apache.hadoop.hbase.security.access.SecureTestUtil; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.Assert; @@ -110,6 +119,8 @@ public static void setupBeforeClass() throws Exception { verifyConfiguration(conf); // Enable EXEC permission checking conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); + TEST_UTIL.getConfiguration().set(HConstants.SNAPSHOT_RESTORE_FAILSAFE_NAME, + "hbase-failsafe-{snapshot.name}-{restore.timestamp}"); TEST_UTIL.startMiniCluster(); TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); MasterCoprocessorHost cpHost = @@ -168,7 +179,7 @@ private void verifyRows(TableName tableName) throws IOException { byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER); Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++)); } - Assert.assertEquals(ROW_COUNT, rowCount); + assertEquals(ROW_COUNT, rowCount); } } @@ -177,7 +188,8 @@ private void verifyRows(TableName tableName) throws IOException { protected abstract void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) throws Exception; - protected abstract void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception; + protected abstract void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, + boolean restoreAcl) throws Exception; @Test public void testRestoreSnapshot() throws Exception { @@ -220,7 +232,7 @@ public void testRestoreSnapshot() throws Exception { // restore snapshot with restoreAcl false. TEST_UTIL.getAdmin().disableTable(TEST_TABLE); - restoreSnapshot(snapshotName1, false); + restoreSnapshot(snapshotName1, false, false); TEST_UTIL.getAdmin().enableTable(TEST_TABLE); verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW); verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE); @@ -229,12 +241,36 @@ public void testRestoreSnapshot() throws Exception { // restore snapshot with restoreAcl true. TEST_UTIL.getAdmin().disableTable(TEST_TABLE); - restoreSnapshot(snapshotName1, true); + restoreSnapshot(snapshotName1, false, true); TEST_UTIL.getAdmin().enableTable(TEST_TABLE); verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW); verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE); verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW); verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE); + + // Delete data.manifest of the snapshot to simulate an invalid snapshot. + Configuration configuration = TEST_UTIL.getConfiguration(); + Path rootDir = new Path(configuration.get(HConstants.HBASE_DIR)); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName1, rootDir); + FileSystem fileSystem = FileSystem.get(rootDir.toUri(), configuration); + Path maniFestPath = new Path(snapshotDir, SnapshotManifest.DATA_MANIFEST_NAME); + fileSystem.delete(maniFestPath, false); + assertFalse(fileSystem.exists(maniFestPath)); + assertEquals(1, TEST_UTIL.getAdmin().listSnapshots(Pattern.compile(snapshotName1)).size()); + // There is no failsafe snapshot before restoring. + int failsafeSnapshotCount = TEST_UTIL.getAdmin() + .listSnapshots(Pattern.compile("hbase-failsafe-" + snapshotName1 + ".*")).size(); + assertEquals(0, failsafeSnapshotCount); + TEST_UTIL.getAdmin().disableTable(TEST_TABLE); + // We would get Exception when restoring data by this an invalid snapshot. + assertThrows(Exception.class, () -> restoreSnapshot(snapshotName1, true, true)); + TEST_UTIL.getAdmin().enableTable(TEST_TABLE); + verifyRows(TEST_TABLE); + // Fail to store snapshot but rollback successfully, so there is no failsafe snapshot after + // restoring. + failsafeSnapshotCount = TEST_UTIL.getAdmin() + .listSnapshots(Pattern.compile("hbase-failsafe-" + snapshotName1 + ".*")).size(); + assertEquals(0, failsafeSnapshotCount); } final class AccessSnapshotAction implements AccessTestAction { @@ -262,8 +298,8 @@ public void testDeleteSnapshot() throws Exception { USER_RO, USER_RW, USER_NONE); List snapshotDescriptions = TEST_UTIL.getAdmin().listSnapshots(Pattern.compile(testSnapshotName)); - Assert.assertEquals(1, snapshotDescriptions.size()); - Assert.assertEquals(USER_OWNER.getShortName(), snapshotDescriptions.get(0).getOwner()); + assertEquals(1, snapshotDescriptions.size()); + assertEquals(USER_OWNER.getShortName(), snapshotDescriptions.get(0).getOwner()); AccessTestAction deleteSnapshotAction = () -> { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin = conn.getAdmin()) { @@ -276,6 +312,6 @@ public void testDeleteSnapshot() throws Exception { List snapshotsAfterDelete = TEST_UTIL.getAdmin().listSnapshots(Pattern.compile(testSnapshotName)); - Assert.assertEquals(0, snapshotsAfterDelete.size()); + assertEquals(0, snapshotsAfterDelete.size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java index f8d0a7950628..411076dcf047 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java @@ -43,7 +43,8 @@ protected void cloneSnapshot(String snapshotName, TableName tableName, boolean r } @Override - protected void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception { - TEST_UTIL.getAdmin().restoreSnapshot(snapshotName, false, restoreAcl); + protected void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, + boolean restoreAcl) throws Exception { + TEST_UTIL.getAdmin().restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java index d20b73540c9f..7c84f645ea73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java @@ -49,10 +49,11 @@ protected void cloneSnapshot(String snapshotName, TableName tableName, boolean r } @Override - protected void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception { + protected void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, + boolean restoreAcl) throws Exception { try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { - conn.getAdmin().restoreSnapshot(snapshotName, false, restoreAcl).get(); + conn.getAdmin().restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl).get(); } } } From d1015a68ed9f94d74668abd37edefd32f5e9305b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 23 Jun 2024 22:57:29 +0800 Subject: [PATCH 431/514] HBASE-28688 Correct the usage for blanks ignore options in yetus (#6026) Signed-off-by: Guanghao Zhang --- dev-support/Jenkinsfile | 3 +- dev-support/Jenkinsfile_GitHub | 3 +- dev-support/blanks-eol-ignore.txt | 24 ++++++++++++++++ dev-support/blanks-tabs-ignore.txt | 28 +++++++++++++++++++ dev-support/hbase_nightly_yetus.sh | 8 +++--- dev-support/jenkins_precommit_github_yetus.sh | 7 +++-- 6 files changed, 64 insertions(+), 9 deletions(-) create mode 100644 dev-support/blanks-eol-ignore.txt create mode 100644 dev-support/blanks-tabs-ignore.txt diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 227229bf8d84..3aadf7316b32 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -44,7 +44,8 @@ pipeline { PERSONALITY_FILE = 'tools/personality.sh' // This section of the docs tells folks not to use the javadoc tag. older branches have our old version of the check for said tag. AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc' - BLANKS_IGNORE_LIST = '.*/generated/.*' + BLANKS_EOL_IGNORE_FILE = 'dev-support/blanks-eol-ignore.txt' + BLANKS_TABS_IGNORE_FILE = 'dev-support/blanks-tabs-ignore.txt' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index 2ea1ef697b4e..b8d85ad5e131 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -41,7 +41,8 @@ pipeline { JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' // This section of the docs tells folks not to use the javadoc tag. older branches have our old version of the check for said tag. AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc' - BLANKS_IGNORE_LIST = '.*/generated/.*' + BLANKS_EOL_IGNORE_FILE = 'dev-support/blanks-eol-ignore.txt' + BLANKS_TABS_IGNORE_FILE = 'dev-support/blanks-tabs-ignore.txt' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. diff --git a/dev-support/blanks-eol-ignore.txt b/dev-support/blanks-eol-ignore.txt new file mode 100644 index 000000000000..6912be308371 --- /dev/null +++ b/dev-support/blanks-eol-ignore.txt @@ -0,0 +1,24 @@ +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## +.*/generated/.* +# we have generated code for other languages in hbase-examples +.*/gen-cpp/.* +.*/gen-perl/.* +.*/gen-php/.* +.*/gen-py/.* +.*/gen-rb/.* diff --git a/dev-support/blanks-tabs-ignore.txt b/dev-support/blanks-tabs-ignore.txt new file mode 100644 index 000000000000..49185487846e --- /dev/null +++ b/dev-support/blanks-tabs-ignore.txt @@ -0,0 +1,28 @@ +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## +.*/generated/.* +# we have generated code for other languages in hbase-examples +.*/gen-cpp/.* +.*/gen-perl/.* +.*/gen-php/.* +.*/gen-py/.* +.*/gen-rb/.* +# we have tabs in asciidoc, not sure whether it is OK to replace them with spaces +src/main/asciidoc/.* +# perl officially suggests use tab instead of space for indentation +.*/*.pl diff --git a/dev-support/hbase_nightly_yetus.sh b/dev-support/hbase_nightly_yetus.sh index 3423a7e73c55..4cee1efde560 100755 --- a/dev-support/hbase_nightly_yetus.sh +++ b/dev-support/hbase_nightly_yetus.sh @@ -20,8 +20,8 @@ declare -i missing_env=0 # Validate params for required_env in "TESTS" "PERSONALITY_FILE" "BASEDIR" "ARCHIVE_PATTERN_LIST" "OUTPUT_DIR_RELATIVE" \ "OUTPUT_DIR" "PROJECT" "AUTHOR_IGNORE_LIST" \ - "BLANKS_IGNORE_LIST" "BRANCH_NAME" "TESTS_FILTER" "DEBUG" \ - "USE_YETUS_PRERELEASE" "WORKSPACE" "YETUS_RELEASE"; do + "BLANKS_EOL_IGNORE_FILE" "BLANKS_TABS_IGNORE_FILE" "BRANCH_NAME" "TESTS_FILTER" \ + "DEBUG" "USE_YETUS_PRERELEASE" "WORKSPACE" "YETUS_RELEASE"; do if [ -z "${!required_env}" ]; then echo "[ERROR] Required environment variable '${required_env}' is not set." missing_env=${missing_env}+1 @@ -59,8 +59,8 @@ YETUS_ARGS=("--patch-dir=${OUTPUT_DIR}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--project=${PROJECT}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--resetrepo" "${YETUS_ARGS[@]}") YETUS_ARGS=("--author-ignore-list=${AUTHOR_IGNORE_LIST}" "${YETUS_ARGS[@]}") -YETUS_ARGS=("--blanks-eol-ignore-list=${BLANKS_IGNORE_LIST}" "${YETUS_ARGS[@]}") -YETUS_ARGS=("--blanks-tabs-ignore-list=${BLANKS_IGNORE_LIST}" "${YETUS_ARGS[@]}") +YETUS_ARGS=("--blanks-eol-ignore-file=${BLANKS_EOL_IGNORE_FILE}" "${YETUS_ARGS[@]}") +YETUS_ARGS=("--blanks-tabs-ignore-file=${BLANKS_TABS_IGNORE_FILE}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--sentinel" "${YETUS_ARGS[@]}") YETUS_ARGS=("--branch=${BRANCH_NAME}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--tests-filter=${TESTS_FILTER}" "${YETUS_ARGS[@]}") diff --git a/dev-support/jenkins_precommit_github_yetus.sh b/dev-support/jenkins_precommit_github_yetus.sh index 845d4ab4e245..8604d96760dc 100755 --- a/dev-support/jenkins_precommit_github_yetus.sh +++ b/dev-support/jenkins_precommit_github_yetus.sh @@ -42,7 +42,8 @@ declare -a required_envs=( "TESTS_FILTER" "YETUSDIR" "AUTHOR_IGNORE_LIST" - "BLANKS_IGNORE_LIST" + "BLANKS_EOL_IGNORE_FILE" + "BLANKS_TABS_IGNORE_FILE" ) # Validate params for required_env in "${required_envs[@]}"; do @@ -112,8 +113,8 @@ YETUS_ARGS+=("--dockerfile=${DOCKERFILE}") YETUS_ARGS+=("--mvn-custom-repos") YETUS_ARGS+=("--java-home=${SET_JAVA_HOME}") YETUS_ARGS+=("--author-ignore-list=${AUTHOR_IGNORE_LIST}") -YETUS_ARGS+=("--blanks-eol-ignore-list=${BLANKS_IGNORE_LIST}") -YETUS_ARGS+=("--blanks-tabs-ignore-list=${BLANKS_IGNORE_LIST}*") +YETUS_ARGS+=("--blanks-eol-ignore-file=${BLANKS_EOL_IGNORE_FILE}") +YETUS_ARGS+=("--blanks-tabs-ignore-file=${BLANKS_TABS_IGNORE_FILE}*") YETUS_ARGS+=("--tests-filter=${TESTS_FILTER}") YETUS_ARGS+=("--personality=${SOURCEDIR}/dev-support/hbase-personality.sh") YETUS_ARGS+=("--quick-hadoopcheck") From 7e8fd07209562cd3974387ed1b942e9b892273a8 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Wed, 26 Jun 2024 08:40:12 -0400 Subject: [PATCH 432/514] HBASE-28687 BackupSystemTable#checkSystemTable should ensure system tables are enabled (#6018) Co-authored-by: Ray Mattingly Signed-off-by: Bryan Beaudreault Signed-off-by: Nick Dimiduk --- .../hbase/backup/impl/BackupSystemTable.java | 13 +++++++ .../hadoop/hbase/backup/TestBackupBase.java | 37 ++++++++++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index c364316d54eb..5a12b45a5861 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; @@ -207,10 +208,12 @@ private void checkSystemTable() throws IOException { TableDescriptor backupHTD = BackupSystemTable.getSystemTableDescriptor(conf); createSystemTable(admin, backupHTD); } + ensureTableEnabled(admin, tableName); if (!admin.tableExists(bulkLoadTableName)) { TableDescriptor blHTD = BackupSystemTable.getSystemTableForBulkLoadedDataDescriptor(conf); createSystemTable(admin, blHTD); } + ensureTableEnabled(admin, bulkLoadTableName); waitForSystemTable(admin, tableName); waitForSystemTable(admin, bulkLoadTableName); } @@ -1889,4 +1892,14 @@ private static byte[] rowkey(String s, String... other) { } return Bytes.toBytes(sb.toString()); } + + private static void ensureTableEnabled(Admin admin, TableName tableName) throws IOException { + if (!admin.isTableEnabled(tableName)) { + try { + admin.enableTable(tableName); + } catch (TableNotDisabledException ignored) { + LOG.info("Table {} is not disabled, ignoring enable request", tableName); + } + } + } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index e9c1cfd9c323..ed17ef8a1173 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -57,6 +58,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner; +import org.apache.hadoop.hbase.regionserver.LogRoller; import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.SecureTestUtil; @@ -67,6 +69,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WALFactory; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -115,6 +118,38 @@ public IncrementalTableBackupClientForTest(Connection conn, String backupId, super(conn, backupId, request); } + @Before + public void ensurePreviousBackupTestsAreCleanedUp() throws Exception { + // Every operation here may not be necessary for any given test, + // some often being no-ops. the goal is to help ensure atomicity + // of that tests that implement TestBackupBase + try (BackupAdmin backupAdmin = getBackupAdmin()) { + backupManager.finishBackupSession(); + backupAdmin.listBackupSets().forEach(backupSet -> { + try { + backupAdmin.deleteBackupSet(backupSet.getName()); + } catch (IOException ignored) { + } + }); + } catch (Exception ignored) { + } + Arrays.stream(TEST_UTIL.getAdmin().listTableNames()) + .filter(tableName -> !tableName.isSystemTable()).forEach(tableName -> { + try { + TEST_UTIL.truncateTable(tableName); + } catch (IOException ignored) { + } + }); + TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> { + try { + LogRoller walRoller = rst.getRegionServer().getWalRoller(); + walRoller.requestRollAll(); + walRoller.waitUntilWalRollFinished(); + } catch (Exception ignored) { + } + }); + } + @Override public void execute() throws IOException { // case INCREMENTAL_COPY: @@ -468,7 +503,7 @@ private BackupInfo getBackupInfo(String backupId) throws IOException { } } - protected BackupAdmin getBackupAdmin() throws IOException { + protected static BackupAdmin getBackupAdmin() throws IOException { return new BackupAdminImpl(TEST_UTIL.getConnection()); } From 80516d712912234be9239435741642f4f724309d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 26 Jun 2024 22:31:38 +0800 Subject: [PATCH 433/514] HBASE-28676 Make pre commit build for 3.x to only run jdk 17 test (#6001) Signed-off-by: Nick Dimiduk --- dev-support/Jenkinsfile_GitHub | 277 +----------------------------- hbase-build-configuration/pom.xml | 11 ++ pom.xml | 2 +- 3 files changed, 13 insertions(+), 277 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index b8d85ad5e131..0813168e7f64 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -56,8 +56,6 @@ pipeline { // stage works in its own subdirectory. there is an "output" under each of these // directories, which we retrieve after the build is complete. WORKDIR_REL_GENERAL_CHECK = 'yetus-general-check' - WORKDIR_REL_JDK8_HADOOP3_CHECK = 'yetus-jdk8-hadoop3-check' - WORKDIR_REL_JDK11_HADOOP3_CHECK = 'yetus-jdk11-hadoop3-check' WORKDIR_REL_JDK17_HADOOP3_CHECK = 'yetus-jdk17-hadoop3-check' ASF_NIGHTLIES = 'https://nightlies.apache.org' ASF_NIGHTLIES_BASE_ORI = "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" @@ -82,8 +80,7 @@ pipeline { environment { // customized per parallel stage PLUGINS = "${GENERAL_CHECK_PLUGINS}" - SET_JAVA_HOME = "/usr/lib/jvm/java-11" - JAVA8_HOME = "/usr/lib/jvm/java-8" + SET_JAVA_HOME = "/usr/lib/jvm/java-17" HADOOP_PROFILE = '3.0' WORKDIR_REL = "${WORKDIR_REL_GENERAL_CHECK}" // identical for all parallel stages @@ -202,278 +199,6 @@ pipeline { } } } - stage ('yetus jdk8 Hadoop3 checks') { - agent { - node { - label 'hbase' - } - } - environment { - // customized per parallel stage - PLUGINS = "${JDK_SPECIFIC_PLUGINS}" - SET_JAVA_HOME = '/usr/lib/jvm/java-8' - WORKDIR_REL = "${WORKDIR_REL_JDK8_HADOOP3_CHECK}" - // identical for all parallel stages - WORKDIR = "${WORKSPACE}/${WORKDIR_REL}" - YETUSDIR = "${WORKDIR}/${YETUS_REL}" - SOURCEDIR = "${WORKDIR}/${SRC_REL}" - PATCHDIR = "${WORKDIR}/${PATCH_REL}" - BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" - DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" - YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" - SKIP_ERRORPRONE = true - } - when { - // this will return true if the pipeline is building a change request, such as a GitHub pull request. - changeRequest() - } - steps { - dir("${SOURCEDIR}") { - checkout scm - } - dir("${YETUSDIR}") { - sh'''#!/usr/bin/env bash - wget https://dlcdn.apache.org/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ - tar --strip-components=1 -xzf apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ - rm apache-yetus-${YETUS_VERSION}-bin.tar.gz - ''' - } - dir("${WORKDIR}") { - withCredentials([ - usernamePassword( - credentialsId: 'apache-hbase-at-github.com', - passwordVariable: 'GITHUB_PASSWORD', - usernameVariable: 'GITHUB_USER' - )]) { - script { - def ret = sh( - label: 'test-patch', - returnStatus: true, - script: '''#!/bin/bash -e - hostname -a ; pwd ; ls -la - printenv 2>&1 | sort - echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" - "${YETUS_DRIVER}" - ''' - ) - if (ret != 0) { - // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of - // test output. See HBASE-26339 for more details. - currentBuild.result = 'UNSTABLE' - } - } - } - } - } - post { - always { - junit testResults: "${WORKDIR_REL}/${SRC_REL}/**/target/**/TEST-*.xml", - allowEmptyResults: true, skipPublishingChecks: true - sh label: 'zip surefire reports', script: '''#!/bin/bash -e - if [ -d "${PATCHDIR}/archiver" ]; then - count=$(find "${PATCHDIR}/archiver" -type f | wc -l) - if [[ 0 -ne ${count} ]]; then - echo "zipping ${count} archived files" - zip -q -m -r "${PATCHDIR}/test_logs.zip" "${PATCHDIR}/archiver" - else - echo "No archived files, skipping compressing." - fi - else - echo "No archiver directory, skipping compressing." - fi - ''' - sshPublisher(publishers: [ - sshPublisherDesc(configName: 'Nightlies', - transfers: [ - sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", - sourceFiles: "${env.WORKDIR_REL}/${env.PATCH_REL}/test_logs.zip" - ) - ] - ) - ]) - // remove the big test logs zip file, store the nightlies url in test_logs.txt - sh '''#!/bin/bash -e - if [ -f "${PATCHDIR}/test_logs.zip" ]; then - echo "Remove ${PATCHDIR}/test_logs.zip for saving space" - rm -rf "${PATCHDIR}/test_logs.zip" - python3 ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${WORKDIR_REL}/${PATCH_REL}" > "${PATCHDIR}/test_logs.html" - else - echo "No test_logs.zip, skipping" - fi - ''' - // Has to be relative to WORKSPACE. - archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit" - archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/**/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit/**/*" - publishHTML target: [ - allowMissing: true, - keepAll: true, - alwaysLinkToLastBuild: true, - // Has to be relative to WORKSPACE - reportDir: "${WORKDIR_REL}/${PATCH_REL}", - reportFiles: 'report.html', - reportName: 'PR JDK8 Hadoop3 Check Report' - ] - } - // Jenkins pipeline jobs fill slaves on PRs without this :( - cleanup() { - script { - sh label: 'Cleanup workspace', script: '''#!/bin/bash -e - # See YETUS-764 - if [ -f "${PATCHDIR}/pidfile.txt" ]; then - echo "test-patch process appears to still be running: killing" - kill `cat "${PATCHDIR}/pidfile.txt"` || true - sleep 10 - fi - if [ -f "${PATCHDIR}/cidfile.txt" ]; then - echo "test-patch container appears to still be running: killing" - docker kill `cat "${PATCHDIR}/cidfile.txt"` || true - fi - # See HADOOP-13951 - chmod -R u+rxw "${WORKSPACE}" - ''' - dir ("${WORKDIR}") { - deleteDir() - } - } - } - } - } - stage ('yetus jdk11 hadoop3 checks') { - agent { - node { - label 'hbase' - } - } - environment { - // customized per parallel stage - PLUGINS = "${JDK_SPECIFIC_PLUGINS}" - SET_JAVA_HOME = '/usr/lib/jvm/java-11' - WORKDIR_REL = "${WORKDIR_REL_JDK11_HADOOP3_CHECK}" - // identical for all parallel stages - WORKDIR = "${WORKSPACE}/${WORKDIR_REL}" - YETUSDIR = "${WORKDIR}/${YETUS_REL}" - SOURCEDIR = "${WORKDIR}/${SRC_REL}" - PATCHDIR = "${WORKDIR}/${PATCH_REL}" - BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" - DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" - YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" - SKIP_ERRORPRONE = true - } - when { - // this will return true if the pipeline is building a change request, such as a GitHub pull request. - changeRequest() - } - steps { - dir("${SOURCEDIR}") { - checkout scm - } - dir("${YETUSDIR}") { - sh'''#!/usr/bin/env bash - wget https://dlcdn.apache.org/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ - tar --strip-components=1 -xzf apache-yetus-${YETUS_VERSION}-bin.tar.gz && \ - rm apache-yetus-${YETUS_VERSION}-bin.tar.gz - ''' - } - dir("${WORKDIR}") { - withCredentials([ - usernamePassword( - credentialsId: 'apache-hbase-at-github.com', - passwordVariable: 'GITHUB_PASSWORD', - usernameVariable: 'GITHUB_USER' - )]) { - script { - def ret = sh( - label: 'test-patch', - returnStatus: true, - script: '''#!/bin/bash -e - hostname -a ; pwd ; ls -la - printenv 2>&1 | sort - echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" - "${YETUS_DRIVER}" - ''' - ) - if (ret != 0) { - // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of - // test output. See HBASE-26339 for more details. - currentBuild.result = 'UNSTABLE' - } - } - } - } - } - post { - always { - junit testResults: "${WORKDIR_REL}/${SRC_REL}/**/target/**/TEST-*.xml", - allowEmptyResults: true, skipPublishingChecks: true - sh label: 'zip surefire reports', script: '''#!/bin/bash -e - if [ -d "${PATCHDIR}/archiver" ]; then - count=$(find "${PATCHDIR}/archiver" -type f | wc -l) - if [[ 0 -ne ${count} ]]; then - echo "zipping ${count} archived files" - zip -q -m -r "${PATCHDIR}/test_logs.zip" "${PATCHDIR}/archiver" - else - echo "No archived files, skipping compressing." - fi - else - echo "No archiver directory, skipping compressing." - fi - ''' - sshPublisher(publishers: [ - sshPublisherDesc(configName: 'Nightlies', - transfers: [ - sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", - sourceFiles: "${env.WORKDIR_REL}/${env.PATCH_REL}/test_logs.zip" - ) - ] - ) - ]) - // remove the big test logs zip file, store the nightlies url in test_logs.txt - sh '''#!/bin/bash -e - if [ -f "${PATCHDIR}/test_logs.zip" ]; then - echo "Remove ${PATCHDIR}/test_logs.zip for saving space" - rm -rf "${PATCHDIR}/test_logs.zip" - python3 ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${WORKDIR_REL}/${PATCH_REL}" > "${PATCHDIR}/test_logs.html" - else - echo "No test_logs.zip, skipping" - fi - ''' - // Has to be relative to WORKSPACE. - archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit" - archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/**/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit/**/*" - publishHTML target: [ - allowMissing: true, - keepAll: true, - alwaysLinkToLastBuild: true, - // Has to be relative to WORKSPACE - reportDir: "${WORKDIR_REL}/${PATCH_REL}", - reportFiles: 'report.html', - reportName: 'PR JDK11 Hadoop3 Check Report' - ] - } - // Jenkins pipeline jobs fill slaves on PRs without this :( - cleanup() { - script { - sh label: 'Cleanup workspace', script: '''#!/bin/bash -e - # See YETUS-764 - if [ -f "${PATCHDIR}/pidfile.txt" ]; then - echo "test-patch process appears to still be running: killing" - kill `cat "${PATCHDIR}/pidfile.txt"` || true - sleep 10 - fi - if [ -f "${PATCHDIR}/cidfile.txt" ]; then - echo "test-patch container appears to still be running: killing" - docker kill `cat "${PATCHDIR}/cidfile.txt"` || true - fi - # See HADOOP-13951 - chmod -R u+rxw "${WORKSPACE}" - ''' - dir ("${WORKDIR}") { - deleteDir() - } - } - } - } - } stage ('yetus jdk17 hadoop3 checks') { agent { node { diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index c3bdfe738900..4bfe31242364 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -77,7 +77,18 @@ ${releaseTarget} true + true + -J--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED + -J--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED + -J--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED + -J--add-opens=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED -XDcompilePolicy=simple -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -XepExcludedPaths:.*/target/.* -Xep:FallThrough:OFF -Xep:MutablePublicArray:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR -Xep:BanJNDI:WARN diff --git a/pom.xml b/pom.xml index 59a1c95b9056..f71bfbf96915 100644 --- a/pom.xml +++ b/pom.xml @@ -809,7 +809,7 @@ ${project.build.finalName}.tar.gz yyyy-MM-dd'T'HH:mm ${maven.build.timestamp} - 1.8 + 17 8 From b948b06c9601c394c65a640b7448e15637ea17b2 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 27 Jun 2024 17:12:11 +0800 Subject: [PATCH 434/514] HBASE-28693 Change flaky tests to run with jdk17 (#6031) Signed-off-by: Xin Sun --- dev-support/docker/Dockerfile | 32 +++++++++---------- .../flaky-tests/run-flaky-tests.Jenkinsfile | 19 +++++------ 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 499397b6313c..06e4b67526f8 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -75,46 +75,46 @@ ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 # FROM base_image AS spotbugs_download_image -ENV SPOTBUGS_VERSION '4.7.3' -ENV SPOTBUGS_URL "https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/${SPOTBUGS_VERSION}/spotbugs-${SPOTBUGS_VERSION}.tgz" -ENV SPOTBUGS_SHA512 '09a9fe0e5a6ec8e9d6d116c361b5c34c9d0560c0271241f02fadee911952adfcd69dc184f6de1cc4d4a8fe2c84c162689ea9a691dcae0779935eedf390fcc4ad' +ENV SPOTBUGS_VERSION='4.7.3' +ENV SPOTBUGS_URL="https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/${SPOTBUGS_VERSION}/spotbugs-${SPOTBUGS_VERSION}.tgz" +ENV SPOTBUGS_SHA512='09a9fe0e5a6ec8e9d6d116c361b5c34c9d0560c0271241f02fadee911952adfcd69dc184f6de1cc4d4a8fe2c84c162689ea9a691dcae0779935eedf390fcc4ad' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/spotbugs.tgz "${SPOTBUGS_URL}" && \ echo "${SPOTBUGS_SHA512} */tmp/spotbugs.tgz" | sha512sum -c - FROM base_image AS hadolint_download_image -ENV HADOLINT_VERSION '2.10.0' -ENV HADOLINT_URL "https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-Linux-x86_64" -ENV HADOLINT_SHA512 '4816c95243bedf15476d2225f487fc17465495fb2031e1a4797d82a26db83a1edb63e4fed084b80cef17d5eb67eb45508caadaf7cd0252fb061187113991a338' +ENV HADOLINT_VERSION='2.10.0' +ENV HADOLINT_URL="https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-Linux-x86_64" +ENV HADOLINT_SHA512='4816c95243bedf15476d2225f487fc17465495fb2031e1a4797d82a26db83a1edb63e4fed084b80cef17d5eb67eb45508caadaf7cd0252fb061187113991a338' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/hadolint "${HADOLINT_URL}" && \ echo "${HADOLINT_SHA512} */tmp/hadolint" | sha512sum -c - FROM base_image AS maven_download_image ENV MAVEN_VERSION='3.8.6' -ENV MAVEN_URL "https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz" -ENV MAVEN_SHA512 'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26' +ENV MAVEN_URL="https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz" +ENV MAVEN_SHA512='f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz "${MAVEN_URL}" && \ echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c - FROM base_image AS openjdk8_download_image -ENV OPENJDK8_URL 'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz' -ENV OPENJDK8_SHA256 '1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee' +ENV OPENJDK8_URL='https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz' +ENV OPENJDK8_SHA256='1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \ echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c - FROM base_image AS openjdk11_download_image -ENV OPENJDK11_URL 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.17%2B8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.17_8.tar.gz' -ENV OPENJDK11_SHA256 'b8d46ed08ef4859476fe6421a7690d899ed83dce63f13fd894f994043177ef3c' +ENV OPENJDK11_URL='https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.17%2B8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.17_8.tar.gz' +ENV OPENJDK11_SHA256='b8d46ed08ef4859476fe6421a7690d899ed83dce63f13fd894f994043177ef3c' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk11.tar.gz "${OPENJDK11_URL}" && \ echo "${OPENJDK11_SHA256} */tmp/adoptopenjdk11.tar.gz" | sha256sum -c - FROM base_image AS openjdk17_download_image -ENV OPENJDK17_URL 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.10%2B7/OpenJDK17U-jdk_x64_linux_hotspot_17.0.10_7.tar.gz' -ENV OPENJDK17_SHA256 'a8fd07e1e97352e97e330beb20f1c6b351ba064ca7878e974c7d68b8a5c1b378' +ENV OPENJDK17_URL='https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.10%2B7/OpenJDK17U-jdk_x64_linux_hotspot_17.0.10_7.tar.gz' +ENV OPENJDK17_SHA256='a8fd07e1e97352e97e330beb20f1c6b351ba064ca7878e974c7d68b8a5c1b378' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk17.tar.gz "${OPENJDK17_URL}" && \ echo "${OPENJDK17_SHA256} */tmp/adoptopenjdk17.tar.gz" | sha256sum -c - @@ -179,9 +179,7 @@ RUN mkdir -p /usr/lib/jvm && \ # these values to be specified here; the various --foo-path flags do not # propigate as expected, while these are honored. # TODO (nd): is this really true? investigate and file a ticket. -ENV SPOTBUGS_HOME '/opt/spotbugs' -ENV MAVEN_HOME '/opt/maven' -ENV MAVEN_OPTS '-Xmx3.6G' +ENV SPOTBUGS_HOME='/opt/spotbugs' MAVEN_HOME='/opt/maven' CMD ["/bin/bash"] diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index bea139c548a5..f8b49f9f5c9c 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -16,8 +16,10 @@ // under the License. pipeline { agent { - node { + dockerfile { + dir 'dev-support/docker' label 'hbase' + args '-v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro' } } triggers { @@ -31,20 +33,19 @@ pipeline { } environment { ASF_NIGHTLIES = 'https://nightlies.apache.org' + JAVA_HOME = '/usr/lib/jvm/java-17' } parameters { booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a lot more meta-information.') } - tools { - // this should match what the yetus nightly job for the branch will use - maven 'maven_latest' - jdk "jdk_1.8_latest" - } stages { stage ('run flaky tests') { steps { sh '''#!/usr/bin/env bash set -e + MVN="${MAVEN_HOME}/bin/mvn" + # print the maven version and java version + ${MVN} --version declare -a curl_args=(--fail) tmpdir=$(realpath target) declare -a mvn_args=(--batch-mode -fn -Dbuild.id="${BUILD_ID}" -Dmaven.repo.local="${WORKSPACE}/local-repository" -Djava.io.tmpdir=${tmpdir}) @@ -56,7 +57,7 @@ pipeline { curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/includes" if [ -s includes.txt ]; then rm -rf local-repository/org/apache/hbase - mvn clean "${mvn_args[@]}" + ${MVN} clean "${mvn_args[@]}" rm -rf "target/machine" && mkdir -p "target/machine" if [ -x dev-support/gather_machine_environment.sh ]; then "./dev-support/gather_machine_environment.sh" "target/machine" @@ -65,11 +66,11 @@ pipeline { else echo "Skipped gathering machine environment because we couldn't read the script to do so." fi - mvn -T0.25C package "${mvn_args[@]}" -Dtest="$(cat includes.txt)" -Dmaven.test.redirectTestOutputToFile=true -Dsurefire.firstPartForkCount=0.25C -Dsurefire.secondPartForkCount=0.25C + ${MVN} -T0.25C package "${mvn_args[@]}" -Dtest="$(cat includes.txt)" -Dmaven.test.redirectTestOutputToFile=true -Dsurefire.firstPartForkCount=0.25C -Dsurefire.secondPartForkCount=0.25C else echo "set of flaky tests is currently empty." fi -''' + ''' } } } From 2f340c774b49980dbe490473af4d04700de0869c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 27 Jun 2024 17:32:20 +0800 Subject: [PATCH 435/514] HBASE-28693 Addendum add hadoop 3.0 profile for branch-2.x (cherry picked from commit 98572e56ac6c079e6657d699e8185c97011d4951) --- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index f8b49f9f5c9c..ff5399549092 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -54,6 +54,10 @@ pipeline { mvn_args=("${mvn_args[@]}" -X) set -x fi + # need to build against hadoop 3.0 profile for branch-2 when using jdk 11+ + if [[ "${BRANCH_NAME}" == *"branch-2"* ]]; then + mvn_args=("${mvn_args[@]}" -Dhadoop.profile=3.0) + fi curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/includes" if [ -s includes.txt ]; then rm -rf local-repository/org/apache/hbase From 8ff8748a38d6b7d4725a41b5589d7c38bd36dae9 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 27 Jun 2024 21:05:53 +0800 Subject: [PATCH 436/514] HBASE-28678 Make nightly builds for 3.x java 17 only and add java 17 test for 2.x (#6032) Signed-off-by: Xin Sun --- dev-support/Jenkinsfile | 28 ++++++++++++++++++++-------- dev-support/hbase-personality.sh | 24 +++++------------------- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 3aadf7316b32..0a47726e53c2 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -205,8 +205,8 @@ pipeline { environment { BASEDIR = "${env.WORKSPACE}/component" TESTS = "${env.SHALLOW_CHECKS}" - SET_JAVA_HOME = "/usr/lib/jvm/java-11" - JAVA8_HOME="/usr/lib/jvm/java-8" + SET_JAVA_HOME = getJavaHomeForYetusGeneralCheck(env.BRANCH_NAME) + JAVA8_HOME = "/usr/lib/jvm/java-8" // Activates hadoop 3.0 profile in maven runs. HADOOP_PROFILE = '3.0' OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}" @@ -307,7 +307,7 @@ pipeline { } } when { - branch 'branch-2*' + branch '*branch-2*' } environment { BASEDIR = "${env.WORKSPACE}/component" @@ -417,6 +417,9 @@ pipeline { label 'hbase' } } + when { + branch '*branch-2*' + } environment { BASEDIR = "${env.WORKSPACE}/component" TESTS = "${env.DEEP_CHECKS}" @@ -527,6 +530,9 @@ pipeline { label 'hbase' } } + when { + branch '*branch-2*' + } environment { BASEDIR = "${env.WORKSPACE}/component" TESTS = "${env.DEEP_CHECKS}" @@ -633,11 +639,6 @@ pipeline { } stage ('yetus jdk17 hadoop3 checks') { - when { - anyOf { - branch 'master';branch 'branch-3' - } - } agent { node { label 'hbase' @@ -943,12 +944,14 @@ pipeline { unstash 'jdk8-hadoop2-result' unstash 'jdk8-hadoop3-result' unstash 'jdk11-hadoop3-result' + unstash 'jdk17-hadoop3-result' unstash 'srctarball-result' sh "printenv" def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/commentfile", "${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP2}/commentfile", "${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP3}/commentfile", "${env.OUTPUT_DIR_RELATIVE_JDK11_HADOOP3}/commentfile", + "${env.OUTPUT_DIR_RELATIVE_JDK17_HADOOP3}/commentfile", 'output-srctarball/commentfile', 'output-integration/commentfile'] echo env.BRANCH_NAME @@ -1018,3 +1021,12 @@ List getJirasToComment(CharSequence source, List seen) { } return seen } +@NonCPS +String getJavaHomeForYetusGeneralCheck(String branchName) { + // for 2.x, build with java 11, for 3.x, build with java 17 + if (branchName.indexOf("branch-2") >=0) { + return "/usr/lib/jvm/java-11"; + } else { + return "/usr/lib/jvm/java-17" + } +} diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index c7131c45e2f1..25eee1463c10 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -178,7 +178,7 @@ function personality_modules # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3 # logic is not both activated within Maven. - if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" = branch-2* ]] ; then + if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" == *"branch-2"* ]] ; then extra="${extra} -Dhadoop.profile=${HADOOP_PROFILE}" fi @@ -490,7 +490,7 @@ function shadedjars_rebuild # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3 # logic is not both activated within Maven. - if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" = branch-2* ]] ; then + if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" = *"branch-2"* ]] ; then maven_args+=("-Dhadoop.profile=${HADOOP_PROFILE}") fi @@ -580,14 +580,7 @@ function hadoopcheck_rebuild # All supported Hadoop versions that we want to test the compilation with # See the Hadoop section on prereqs in the HBase Reference Guide - if [[ "${PATCH_BRANCH}" = branch-2.4 ]]; then - yetus_info "Setting Hadoop 2 versions to test based on branch-2.4 rules." - if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop2_versions="2.10.2" - else - hbase_hadoop2_versions="2.10.0 2.10.1 2.10.2" - fi - elif [[ "${PATCH_BRANCH}" = branch-2* ]]; then + if [[ "${PATCH_BRANCH}" = *"branch-2"* ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-2.5+ rules." hbase_hadoop2_versions="2.10.2" else @@ -595,14 +588,7 @@ function hadoopcheck_rebuild hbase_hadoop2_versions="" fi - if [[ "${PATCH_BRANCH}" = branch-2.4 ]]; then - yetus_info "Setting Hadoop 3 versions to test based on branch-2.4 rules" - if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.1.4 3.2.4 3.3.6" - else - hbase_hadoop3_versions="3.1.1 3.1.2 3.1.3 3.1.4 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.3.0 3.3.1 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6" - fi - elif [[ "${PATCH_BRANCH}" = branch-2.5 ]]; then + if [[ "${PATCH_BRANCH}" = *"branch-2.5"* ]]; then yetus_info "Setting Hadoop 3 versions to test based on branch-2.5 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop3_versions="3.2.4 3.3.6" @@ -642,7 +628,7 @@ function hadoopcheck_rebuild done hadoop_profile="" - if [[ "${PATCH_BRANCH}" = branch-2* ]]; then + if [[ "${PATCH_BRANCH}" == *"branch-2"* ]]; then hadoop_profile="-Dhadoop.profile=3.0" fi for hadoopver in ${hbase_hadoop3_versions}; do From c722dde59fcfc8b5e176e8e92923d759bd761163 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 29 Jun 2024 21:36:57 +0800 Subject: [PATCH 437/514] HBASE-28694 Make client integration and packaging test work with java 17 (#6035) Signed-off-by: Xin Sun --- dev-support/Jenkinsfile | 50 ++++++++++++------- .../hbase_nightly_pseudo-distributed-test.sh | 4 +- dev-support/hbase_nightly_source-artifact.sh | 30 +++++++---- 3 files changed, 54 insertions(+), 30 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 0a47726e53c2..946525606145 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -758,11 +758,6 @@ pipeline { label 'hbase-large' } } - tools { - maven 'maven_latest' - // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "jdk_1.8_latest" - } environment { BASEDIR = "${env.WORKSPACE}/component" BRANCH = "${env.BRANCH_NAME}" @@ -797,21 +792,25 @@ pipeline { echo "got the following saved stats in 'output-srctarball/machine'" ls -lh "output-srctarball/machine" ''' - sh """#!/bin/bash -e + sh '''#!/bin/bash -e echo "Checking the steps for an RM to make a source artifact, then a binary artifact." - if "${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh" \ + docker build -t hbase-integration-test -f "${BASEDIR}/dev-support/docker/Dockerfile" . + docker run --rm -v "${WORKSPACE}":/hbase -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \ + -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" --workdir=/hbase hbase-integration-test \ + "component/dev-support/hbase_nightly_source-artifact.sh" \ --intermediate-file-dir output-srctarball \ --unpack-temp-dir unpacked_src_tarball \ --maven-m2-initial .m2-for-repo \ --maven-m2-src-build .m2-for-src \ --clean-source-checkout \ - "${env.BASEDIR}" ; then + component + if [ $? -eq 0 ]; then echo '(/) {color:green}+1 source release artifact{color}\n-- See build output for details.' >output-srctarball/commentfile else echo '(x) {color:red}-1 source release artifact{color}\n-- See build output for details.' >output-srctarball/commentfile exit 1 fi - """ + ''' echo "unpacking the hbase bin tarball into 'hbase-install' and the client tarball into 'hbase-client'" sh '''#!/bin/bash -e if [ 2 -ne $(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | grep -v hadoop3 | wc -l) ]; then @@ -834,21 +833,25 @@ pipeline { ''' unstash 'hadoop-2' sh '''#!/bin/bash -xe - if [[ "${BRANCH}" = branch-2* ]]; then + if [[ "${BRANCH}" == *"branch-2"* ]]; then echo "Attempting to use run an instance on top of Hadoop 2." artifact=$(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | head -n 1) tar --strip-components=1 -xzf "${artifact}" -C "hadoop-2" - if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \ + docker build -t hbase-integration-test -f "${BASEDIR}/dev-support/docker/Dockerfile" . + docker run --rm -v "${WORKSPACE}":/hbase -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \ + -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-8" --workdir=/hbase hbase-integration-test \ + component/dev-support/hbase_nightly_pseudo-distributed-test.sh \ --single-process \ --working-dir output-integration/hadoop-2 \ --hbase-client-install "hbase-client" \ - "hbase-install" \ - "hadoop-2/bin/hadoop" \ + hbase-install \ + hadoop-2/bin/hadoop \ hadoop-2/share/hadoop/yarn/timelineservice \ hadoop-2/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \ hadoop-2/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \ hadoop-2/bin/mapred \ - >output-integration/hadoop-2.log 2>&1 ; then + >output-integration/hadoop-2.log 2>&1 + if [ $? -ne 0 ]; then echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 2. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-2.log]. (note that this means we didn't run on Hadoop 3)" >output-integration/commentfile exit 2 fi @@ -870,7 +873,12 @@ pipeline { hbase_install_dir="hbase-hadoop3-install" hbase_client_dir="hbase-hadoop3-client" fi - if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \ + docker build -t hbase-integration-test -f "${BASEDIR}/dev-support/docker/Dockerfile" . + docker run --rm -v "${WORKSPACE}":/hbase -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \ + -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" \ + -e HADOOP_OPTS="--add-opens java.base/java.lang=ALL-UNNAMED" \ + --workdir=/hbase hbase-integration-test \ + component/dev-support/hbase_nightly_pseudo-distributed-test.sh \ --single-process \ --working-dir output-integration/hadoop-3 \ --hbase-client-install ${hbase_client_dir} \ @@ -880,12 +888,17 @@ pipeline { hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \ hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \ hadoop-3/bin/mapred \ - >output-integration/hadoop-3.log 2>&1 ; then + >output-integration/hadoop-3.log 2>&1 + if [ $? -ne 0 ]; then echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 3. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-3.log]. (note that this means we didn't check the Hadoop 3 shaded client)" >output-integration/commentfile exit 2 fi echo "Attempting to use run an instance on top of Hadoop 3, relying on the Hadoop client artifacts for the example client program." - if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \ + docker run --rm -v "${WORKSPACE}":/hbase -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \ + -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" \ + -e HADOOP_OPTS="--add-opens java.base/java.lang=ALL-UNNAMED" \ + --workdir=/hbase hbase-integration-test \ + component/dev-support/hbase_nightly_pseudo-distributed-test.sh \ --single-process \ --hadoop-client-classpath hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar \ --working-dir output-integration/hadoop-3-shaded \ @@ -896,7 +909,8 @@ pipeline { hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \ hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \ hadoop-3/bin/mapred \ - >output-integration/hadoop-3-shaded.log 2>&1 ; then + >output-integration/hadoop-3-shaded.log 2>&1 + if [ $? -ne 0 ]; then echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 3 using Hadoop's shaded client. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-3-shaded.log]." >output-integration/commentfile exit 2 fi diff --git a/dev-support/hbase_nightly_pseudo-distributed-test.sh b/dev-support/hbase_nightly_pseudo-distributed-test.sh index 8290f06110f3..56fec01aef7d 100755 --- a/dev-support/hbase_nightly_pseudo-distributed-test.sh +++ b/dev-support/hbase_nightly_pseudo-distributed-test.sh @@ -513,11 +513,11 @@ public class HBaseClientReadWriteExample { } EOF redirect_and_run "${working_dir}/hbase-shaded-client-compile" \ - javac -cp "${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hadoop_jars}" "${working_dir}/HBaseClientReadWriteExample.java" + $JAVA_HOME/bin/javac -cp "${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hadoop_jars}" "${working_dir}/HBaseClientReadWriteExample.java" echo "Running shaded client example. It'll fetch the set of regions, round-trip them to a file in HDFS, then write them one-per-row into the test table." # The order of classpath entries here is important. if we're using non-shaded Hadoop 3 / 2.9.0 jars, we have to work around YARN-2190. redirect_and_run "${working_dir}/hbase-shaded-client-example" \ - java -cp "${working_dir}/hbase-conf/:${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hbase_dep_classpath}:${working_dir}:${hadoop_jars}" HBaseClientReadWriteExample + $JAVA_HOME/bin/java -cp "${working_dir}/hbase-conf/:${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hbase_dep_classpath}:${working_dir}:${hadoop_jars}" HBaseClientReadWriteExample echo "Checking on results of example program." "${hadoop_exec}" --config "${working_dir}/hbase-conf/" fs -copyToLocal "example-region-listing.data" "${working_dir}/example-region-listing.data" diff --git a/dev-support/hbase_nightly_source-artifact.sh b/dev-support/hbase_nightly_source-artifact.sh index 79c62ca77434..59667408cfa8 100755 --- a/dev-support/hbase_nightly_source-artifact.sh +++ b/dev-support/hbase_nightly_source-artifact.sh @@ -33,6 +33,11 @@ function usage { echo " a git checkout, including ignored files." exit 1 } + +MVN="mvn" +if ! command -v mvn &>/dev/null; then + MVN=$MAVEN_HOME/bin/mvn +fi # if no args specified, show usage if [ $# -lt 1 ]; then usage @@ -124,7 +129,7 @@ fi # See http://hbase.apache.org/book.html#maven.release echo "Maven details, in case our JDK doesn't match expectations:" -mvn --version --offline | tee "${working_dir}/maven_version" +${MVN} --version --offline | tee "${working_dir}/maven_version" echo "Do a clean building of the source artifact using code in ${component_dir}" cd "${component_dir}" @@ -183,16 +188,16 @@ function build_tarball { local build_log="srctarball_install.log" local tarball_glob="hbase-*-bin.tar.gz" if [ $build_hadoop3 -ne 0 ]; then - local version=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) + local version=$(${MVN} -Dmaven.repo.local="${m2_tarbuild}" help:evaluate -Dexpression=project.version -q -DforceStdout) local hadoop3_version=$(get_hadoop3_version $version) mvn_extra_args="-Drevision=${hadoop3_version} -Dhadoop.profile=3.0" build_log="hadoop3_srctarball_install.log" tarball_glob="hbase-*-hadoop3-*-bin.tar.gz" echo "Follow the ref guide section on making a RC: Step 8 Build the hadoop3 binary tarball." else - echo "Follow the ref guide section on making a RC: Step 8 Build the binary tarball." + echo "Follow the ref guide section on making a RC: Step 7 Build the binary tarball." fi - if mvn --threads=2 -DskipTests -Prelease --batch-mode -Dmaven.repo.local="${m2_tarbuild}" ${mvn_extra_args} clean install \ + if ${MVN} --threads=2 -DskipTests -Prelease --batch-mode -Dmaven.repo.local="${m2_tarbuild}" ${mvn_extra_args} clean install \ assembly:single >"${working_dir}/${build_log}" 2>&1; then for artifact in "${unpack_dir}"/hbase-assembly/target/${tarball_glob}; do if [ -f "${artifact}" ]; then @@ -212,20 +217,25 @@ function build_tarball { cd "${unpack_dir}" -build_tarball 0 +${MVN} -Dmaven.repo.local="${m2_tarbuild}" help:active-profiles | grep -q hadoop-3.0 if [ $? -ne 0 ]; then - exit 1 -fi + echo "The hadoop-3.0 profile is not activated by default, build a default tarball first." + # use java 8 to build with hadoop2 + JAVA_HOME="/usr/lib/jvm/java-8" build_tarball 0 + if [ $? -ne 0 ]; then + exit 1 + fi -mvn help:active-profiles | grep -q hadoop-3.0 -if [ $? -ne 0 ]; then - echo "The hadoop-3.0 profile is not activated by default, build a hadoop3 tarball." # move the previous tarballs out, so it will not be cleaned while building against hadoop3 mv "${unpack_dir}"/hbase-assembly/target/hbase-*-bin.tar.gz "${unpack_dir}"/ + echo "build a hadoop3 tarball." build_tarball 1 if [ $? -ne 0 ]; then exit 1 fi # move tarballs back mv "${unpack_dir}"/hbase-*-bin.tar.gz "${unpack_dir}"/hbase-assembly/target/ +else + echo "The hadoop-3.0 profile is activated by default, build a default tarball." + build_tarball 0 fi From e6395a0dea10c1773b32d61afab4a54c64e05d2b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 29 Jun 2024 21:37:27 +0800 Subject: [PATCH 438/514] HBASE-28699 Bump jdk and maven versions in pre commit and nighly dockerfile (#6034) Signed-off-by: Xin Sun --- dev-support/docker/Dockerfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 06e4b67526f8..26b2c35b3462 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -91,30 +91,30 @@ RUN curl --location --fail --silent --show-error --output /tmp/hadolint "${HADOL echo "${HADOLINT_SHA512} */tmp/hadolint" | sha512sum -c - FROM base_image AS maven_download_image -ENV MAVEN_VERSION='3.8.6' +ENV MAVEN_VERSION='3.9.8' ENV MAVEN_URL="https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz" -ENV MAVEN_SHA512='f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26' +ENV MAVEN_SHA512='7d171def9b85846bf757a2cec94b7529371068a0670df14682447224e57983528e97a6d1b850327e4ca02b139abaab7fcb93c4315119e6f0ffb3f0cbc0d0b9a2' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz "${MAVEN_URL}" && \ echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c - FROM base_image AS openjdk8_download_image -ENV OPENJDK8_URL='https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz' -ENV OPENJDK8_SHA256='1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee' +ENV OPENJDK8_URL='https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u412-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u412b08.tar.gz' +ENV OPENJDK8_SHA256='b9884a96f78543276a6399c3eb8c2fd8a80e6b432ea50e87d3d12d495d1d2808' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \ echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c - FROM base_image AS openjdk11_download_image -ENV OPENJDK11_URL='https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.17%2B8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.17_8.tar.gz' -ENV OPENJDK11_SHA256='b8d46ed08ef4859476fe6421a7690d899ed83dce63f13fd894f994043177ef3c' +ENV OPENJDK11_URL='https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.23%2B9/OpenJDK11U-jdk_x64_linux_hotspot_11.0.23_9.tar.gz' +ENV OPENJDK11_SHA256='23e47ea7a3015be3240f21185fd902adebdcf76530757c9b482c7eb5bd3417c2' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk11.tar.gz "${OPENJDK11_URL}" && \ echo "${OPENJDK11_SHA256} */tmp/adoptopenjdk11.tar.gz" | sha256sum -c - FROM base_image AS openjdk17_download_image -ENV OPENJDK17_URL='https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.10%2B7/OpenJDK17U-jdk_x64_linux_hotspot_17.0.10_7.tar.gz' -ENV OPENJDK17_SHA256='a8fd07e1e97352e97e330beb20f1c6b351ba064ca7878e974c7d68b8a5c1b378' +ENV OPENJDK17_URL='https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.11%2B9/OpenJDK17U-jdk_x64_linux_hotspot_17.0.11_9.tar.gz' +ENV OPENJDK17_SHA256='aa7fb6bb342319d227a838af5c363bfa1b4a670c209372f9e6585bd79da6220c' SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl --location --fail --silent --show-error --output /tmp/adoptopenjdk17.tar.gz "${OPENJDK17_URL}" && \ echo "${OPENJDK17_SHA256} */tmp/adoptopenjdk17.tar.gz" | sha256sum -c - From 27d32b797a4fa698cc4d8764d8a8a5a38cac3f24 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 29 Jun 2024 22:03:35 +0800 Subject: [PATCH 439/514] Revert "HASE-28414 create-release should spotless:apply after making any file changes (#5824)" This reverts commit 8b5ccda02f80037f32a43b5423d2e82edfeaa409. Bad jira issue number --- dev-support/create-release/release-build.sh | 6 ++---- dev-support/create-release/release-util.sh | 11 ----------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index 6cc855c97259..f3d8798be462 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -146,8 +146,7 @@ if [[ "$1" == "tag" ]]; then # Create release version maven_set_version "$RELEASE_VERSION" - maven_spotless_apply - git_add_poms + find . -name pom.xml -exec git add {} \; # Always put CHANGES.md and RELEASENOTES.md to parent directory, so later we do not need to # check their position when generating release data. We can not put them under the source code # directory because for 3.x+, CHANGES.md and RELEASENOTES.md are not tracked so later when @@ -169,8 +168,7 @@ if [[ "$1" == "tag" ]]; then # Create next version maven_set_version "$NEXT_VERSION" - maven_spotless_apply - git_add_poms + find . -name pom.xml -exec git add {} \; git commit -s -m "Preparing development version $NEXT_VERSION" if ! is_dry_run; then diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index a33319fd3614..3a1b38644f85 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -871,14 +871,3 @@ function get_hadoop3_version() { echo "${version}-hadoop3" fi } - -# Run mvn spotless:apply to format the code base -# For 2.x, the generated CHANGES.md and RELEASENOTES.md may have lines end with whitespace and -# case spotless:check failure, so we should run spotless:apply before committing -function maven_spotless_apply() { - "${MVN[@]}" spotless:apply -} - -function git_add_poms() { - find . -name pom.xml -exec git add {} \; -} From 22e774e75ca27a9681fcd510f6c591a5fa4280dc Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 21 Apr 2024 19:40:07 +0800 Subject: [PATCH 440/514] HBASE-28414 create-release should spotless:apply after making any file changes (#5824) Signed-off-by: Bryan Beaudreault (cherry picked from commit 8b5ccda02f80037f32a43b5423d2e82edfeaa409) --- dev-support/create-release/release-build.sh | 6 ++++-- dev-support/create-release/release-util.sh | 11 +++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index f3d8798be462..6cc855c97259 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -146,7 +146,8 @@ if [[ "$1" == "tag" ]]; then # Create release version maven_set_version "$RELEASE_VERSION" - find . -name pom.xml -exec git add {} \; + maven_spotless_apply + git_add_poms # Always put CHANGES.md and RELEASENOTES.md to parent directory, so later we do not need to # check their position when generating release data. We can not put them under the source code # directory because for 3.x+, CHANGES.md and RELEASENOTES.md are not tracked so later when @@ -168,7 +169,8 @@ if [[ "$1" == "tag" ]]; then # Create next version maven_set_version "$NEXT_VERSION" - find . -name pom.xml -exec git add {} \; + maven_spotless_apply + git_add_poms git commit -s -m "Preparing development version $NEXT_VERSION" if ! is_dry_run; then diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 3a1b38644f85..a33319fd3614 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -871,3 +871,14 @@ function get_hadoop3_version() { echo "${version}-hadoop3" fi } + +# Run mvn spotless:apply to format the code base +# For 2.x, the generated CHANGES.md and RELEASENOTES.md may have lines end with whitespace and +# case spotless:check failure, so we should run spotless:apply before committing +function maven_spotless_apply() { + "${MVN[@]}" spotless:apply +} + +function git_add_poms() { + find . -name pom.xml -exec git add {} \; +} From 0a1f1c4be4563897a6294bb0b05d82bf47617c5d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 3 Jul 2024 11:04:42 +0800 Subject: [PATCH 441/514] HBASE-28675 Change releaseTarget to 17 and also remove unused profile for older jdk versions in pom (#6037) Signed-off-by: Nick Dimiduk --- .../hadoop/hbase/client/AsyncAdmin.java | 1 + hbase-common/pom.xml | 5 ++ .../hadoop/hbase/io/compress/Compression.java | 1 + .../io/encoding/TestEncodedDataBlock.java | 42 +++++++------ .../regionserver/MetricsTableWrapperStub.java | 1 + .../hbase/io/hfile/bucket/BucketCache.java | 52 ++-------------- .../regionserver/ServerNonceManager.java | 10 +-- .../throttle/StoreHotnessProtector.java | 9 +-- pom.xml | 61 +++++-------------- 9 files changed, 61 insertions(+), 121 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index bdb0228d9687..331aa4a254af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1393,6 +1393,7 @@ default CompletableFuture balance() { * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} * instead. */ + @Deprecated default CompletableFuture balance(boolean forcible) { return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(forcible).build()) .thenApply(BalanceResponse::isBalancerRan); diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index dd30a7a6f581..ec43ddea525f 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -131,6 +131,11 @@ mockito-core test + + org.mockito + mockito-inline + test + org.slf4j jcl-over-slf4j diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java index c55cdaa1f302..7f73cd2f004e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java @@ -554,6 +554,7 @@ private static CompressionCodec buildCodec(final Configuration conf, final Algor throw new RuntimeException("No codec configured for " + algo.confKey); } Class codecClass = getClassLoaderForCodec().loadClass(codecClassName); + // The class is from hadoop so we use hadoop's ReflectionUtils to create it CompressionCodec codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, new Configuration(conf)); LOG.info("Loaded codec {} for compression algorithm {}", codec.getClass().getCanonicalName(), diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedDataBlock.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedDataBlock.java index 55fad74295e6..b9319ffacbca 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedDataBlock.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedDataBlock.java @@ -17,20 +17,27 @@ */ package org.apache.hadoop.hbase.io.encoding; -import java.io.IOException; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Before; +import org.apache.hadoop.util.ReflectionUtils; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mockito.Mockito; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.MockitoJUnitRunner; /** - * Test for EncodedDataBlock + * Test for HBASE-23342 */ +@RunWith(MockitoJUnitRunner.class) @Category({ MiscTests.class, SmallTests.class }) public class TestEncodedDataBlock { @@ -38,26 +45,23 @@ public class TestEncodedDataBlock { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestEncodedDataBlock.class); - private Algorithm algo; + // for generating exception + @Mock + private MockedStatic mockedReflectionUtils; + private static final byte[] INPUT_BYTES = new byte[] { 0, 1, 0, 0, 1, 2, 3, 0, 0, 1, 0, 0, 1, 2, 3, 0, 0, 1, 0, 0, 1, 2, 3, 0, 0, 1, 0, 0, 1, 2, 3, 0 }; - @Before - public void setUp() throws IOException { - algo = Mockito.mock(Algorithm.class); - } - + @SuppressWarnings("unchecked") @Test public void testGetCompressedSize() throws Exception { - Mockito.when(algo.createCompressionStream(Mockito.any(), Mockito.any(), Mockito.anyInt())) - .thenThrow(IOException.class); - try { - EncodedDataBlock.getCompressedSize(algo, null, INPUT_BYTES, 0, 0); - throw new RuntimeException("Should not reach here"); - } catch (IOException e) { - Mockito.verify(algo, Mockito.times(1)).createCompressionStream(Mockito.any(), Mockito.any(), - Mockito.anyInt()); - } + RuntimeException inject = new RuntimeException("inject error"); + mockedReflectionUtils.when(() -> ReflectionUtils.newInstance(any(Class.class), any())) + .thenThrow(inject); + RuntimeException error = assertThrows(RuntimeException.class, + () -> EncodedDataBlock.getCompressedSize(Algorithm.GZ, null, INPUT_BYTES, 0, 0)); + // make sure we get the injected error instead of NPE + assertSame(inject, error); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 4ecbadc6e936..702096f852b5 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -117,6 +117,7 @@ public long getCpRequestsCount(String table) { return 99; } + @Override public long getStaticIndexSize(String table) { return 101; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 7ee7a03ba647..8ee0b6b98ada 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -565,10 +565,10 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach } /** - * If the passed cache key relates to a reference (.), this method looks - * for the block from the referred file, in the cache. If present in the cache, the block for the - * referred file is returned, otherwise, this method returns null. It will also return null if the - * passed cache key doesn't relate to a reference. + * If the passed cache key relates to a reference (<hfile>.<parentEncRegion>), this + * method looks for the block from the referred file, in the cache. If present in the cache, the + * block for the referred file is returned, otherwise, this method returns null. It will also + * return null if the passed cache key doesn't relate to a reference. * @param key the BlockCacheKey instance to look for in the cache. * @return the cached block from the referred file, null if there's no such block in the cache or * the passed key doesn't relate to a reference. @@ -1441,50 +1441,6 @@ private void dumpPrefetchList() { } } - /** - * Create an input stream that deletes the file after reading it. Use in try-with-resources to - * avoid this pattern where an exception thrown from a finally block may mask earlier exceptions: - * - *

    -   *   File f = ...
    -   *   try (FileInputStream fis = new FileInputStream(f)) {
    -   *     // use the input stream
    -   *   } finally {
    -   *     if (!f.delete()) throw new IOException("failed to delete");
    -   *   }
    -   * 
    - * - * @param file the file to read and delete - * @return a FileInputStream for the given file - * @throws IOException if there is a problem creating the stream - */ - private FileInputStream deleteFileOnClose(final File file) throws IOException { - return new FileInputStream(file) { - private File myFile; - - private FileInputStream init(File file) { - myFile = file; - return this; - } - - @Override - public void close() throws IOException { - // close() will be called during try-with-resources and it will be - // called by finalizer thread during GC. To avoid double-free resource, - // set myFile to null after the first call. - if (myFile == null) { - return; - } - - super.close(); - if (!myFile.delete()) { - throw new IOException("Failed deleting persistence file " + myFile.getAbsolutePath()); - } - myFile = null; - } - }.init(file); - } - private void verifyCapacityAndClasses(long capacitySize, String ioclass, String mapclass) throws IOException { if (capacitySize != cacheCapacity) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index 72e7a6bdd81f..a3ba4ff9db5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -17,8 +17,9 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.text.SimpleDateFormat; -import java.util.Date; +import java.time.Instant; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.conf.Configuration; @@ -46,7 +47,8 @@ public class ServerNonceManager { */ private int conflictWaitIterationMs = 30000; - private static final SimpleDateFormat tsFormat = new SimpleDateFormat("HH:mm:ss.SSS"); + private static final DateTimeFormatter TS_FORMAT = + DateTimeFormatter.ofPattern("HH:mm:ss.SSS").withZone(ZoneId.systemDefault()); // This object is used to synchronize on in case of collisions, and for cleanup. private static class OperationContext { @@ -65,7 +67,7 @@ private static class OperationContext { @Override public String toString() { return "[state " + getState() + ", hasWait " + hasWait() + ", activity " - + tsFormat.format(new Date(getActivityTime())) + "]"; + + TS_FORMAT.format(Instant.ofEpochMilli(getActivityTime())) + "]"; } public OperationContext() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index 70683cb45722..6a6f52ebf294 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -128,7 +128,7 @@ public void start(Map> familyMaps) throws RegionTooBusyExcept return; } - String tooBusyStore = null; + StringBuilder tooBusyStore = new StringBuilder(); boolean aboveParallelThreadLimit = false; boolean aboveParallelPrePutLimit = false; @@ -148,9 +148,10 @@ public void start(Map> familyMaps) throws RegionTooBusyExcept store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit; boolean storeAbovePrePut = preparePutCount > this.parallelPreparePutToStoreThreadLimit; if (storeAboveThread || storeAbovePrePut) { - tooBusyStore = (tooBusyStore == null - ? store.getColumnFamilyName() - : tooBusyStore + "," + store.getColumnFamilyName()); + if (tooBusyStore.length() > 0) { + tooBusyStore.append(','); + } + tooBusyStore.append(store.getColumnFamilyName()); } aboveParallelThreadLimit |= storeAboveThread; aboveParallelPrePutLimit |= storeAbovePrePut; diff --git a/pom.xml b/pom.xml index f71bfbf96915..1da1648e1355 100644 --- a/pom.xml +++ b/pom.xml @@ -810,7 +810,7 @@ yyyy-MM-dd'T'HH:mm ${maven.build.timestamp} 17 - 8 + 17 3.5.0 @@ -834,12 +834,8 @@ 3.10.6.Final 4.1.108.Final - 0.13.0 - - 0.13.0 + 0.15.0 + 0.15.0 1.11.3 2.8.1 1.15 @@ -993,9 +989,13 @@ "-Djava.library.path=${hadoop.library.path};${java.library.path}" -Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced -Dio.opentelemetry.context.enableStrictContext=true - - -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true + + -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED @@ -1008,10 +1008,8 @@ --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED - --add-exports java.base/sun.net.util=ALL-UNNAMED - - --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED + --add-exports java.base/sun.net.util=ALL-UNNAMED + --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED --add-opens java.base/sun.security.x509=ALL-UNNAMED --add-opens java.base/sun.security.util=ALL-UNNAMED @@ -3304,24 +3302,13 @@ --> - build-with-jdk8 - - 1.8 - - - ${compileSource} - ${compileSource} - - - - build-with-jdk11 + build-with-jdk17 - [11,) + [17,) ${releaseTarget} - - ${hbase-surefire.jdk11.flags} + ${hbase-surefire.jdk17.flags} ${hbase-surefire.argLine} @{jacocoArgLine} 2200m - - 0.14.1 @@ -3361,18 +3342,6 @@ - - build-with-jdk17 - - [17,) - - - ${hbase-surefire.jdk11.flags} - ${hbase-surefire.jdk17.flags} - ${hbase-surefire.argLine} - @{jacocoArgLine} - - jenkins.patch From 3d4bf2edbbdd6206555df7245b6978fe331f541b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 4 Jul 2024 15:28:49 +0800 Subject: [PATCH 442/514] HBASE-28700 Change hbase site build to use jdk 17 (#6036) Signed-off-by: Xin Sun --- dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile index 7e8ec44a4e6a..c5531296a5fb 100644 --- a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile +++ b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile @@ -39,7 +39,7 @@ pipeline { tools { maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "jdk_1.8_latest" + jdk "jdk_17_latest" } steps { dir('hbase') { From 4821521b813a5c42d5b6b361a806e9ba3c5a7a84 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Fri, 5 Jul 2024 15:21:02 -0700 Subject: [PATCH 443/514] HBASE-28703 Data race in RecoveredEditsOutputSink while closing writers (#6042) Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/wal/OutputSink.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java index c3df5e4ee6a5..7a2d7fba6d70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CompletionService; @@ -58,9 +59,12 @@ abstract class OutputSink { protected final AtomicLong totalSkippedEdits = new AtomicLong(); /** - * List of all the files produced by this sink + * List of all the files produced by this sink, + *

    + * Must be a synchronized list to avoid concurrency issues. CopyOnWriteArrayList is not a good + * choice because all we do is add to the list and then return the result. */ - protected final List splits = new ArrayList<>(); + protected final List splits = Collections.synchronizedList(new ArrayList<>()); protected MonitoredTask status = null; From 305f2ce28e15dba5f5c57b4ae253ab43b33c7781 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Jul 2024 11:17:25 +0800 Subject: [PATCH 444/514] HBASE-28709 Bump certifi in /dev-support/git-jira-release-audit (#6048) Bumps [certifi](https://github.com/certifi/python-certifi) from 2023.7.22 to 2024.7.4. - [Commits](https://github.com/certifi/python-certifi/compare/2023.07.22...2024.07.04) --- updated-dependencies: - dependency-name: certifi dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index e2a04baca778..cd3c205a8fff 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -16,7 +16,7 @@ # limitations under the License. # blessed==1.17.0 -certifi==2023.7.22 +certifi==2024.7.4 cffi==1.13.2 chardet==3.0.4 cryptography==42.0.4 From 68f3a2674cb5e25c1ce33e4cc65797251bf35c07 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 8 Jul 2024 10:56:12 +0800 Subject: [PATCH 445/514] HBASE-28477 Update ref guide about the EOL for branch-2.4 (#6049) Signed-off-by: Yi Mei --- src/main/asciidoc/_chapters/community.adoc | 4 +- .../asciidoc/_chapters/configuration.adoc | 51 ++++++++++--------- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/main/asciidoc/_chapters/community.adoc b/src/main/asciidoc/_chapters/community.adoc index d62fb22dcacf..5dfc42a77066 100644 --- a/src/main/asciidoc/_chapters/community.adoc +++ b/src/main/asciidoc/_chapters/community.adoc @@ -165,8 +165,8 @@ If this list goes out of date or you can't reach the listed person, reach out to | 2.4 | Andrew Purtell -| Check the https://hbase.apache.org/downloads.html[download] page -| *NOT YET* +| 2.4.18 +| June 2024 | 2.5 | Andrew Purtell diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 47481ab5c559..9a95eb22a481 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -294,16 +294,19 @@ use Apache Hadoop, or a vendor's distribution of Hadoop. No distinction is made link:https://cwiki.apache.org/confluence/display/HADOOP2/Distributions+and+Commercial+Support[the Hadoop wiki] for information about vendors of Hadoop. -.Hadoop 2.x is recommended. +.Hadoop 3.x is recommended. [TIP] ==== -Hadoop 2.x is faster and includes features, such as short-circuit reads (see -<>), which will help improve your HBase random read profile. Hadoop -2.x also includes important bug fixes that will improve your overall HBase experience. HBase does -not support running with earlier versions of Hadoop. See the table below for requirements specific -to different HBase versions. +Comparing to Hadoop 1.x, Hadoop 2.x is faster and includes features, such as short-circuit reads +(see <>), which will help improve your HBase random read profile. +Hadoop 2.x also includes important bug fixes that will improve your overall HBase experience. HBase +does not support running with earlier versions of Hadoop. See the table below for requirements +specific to different HBase versions. + +Today, Hadoop 3.x is recommended as the last Hadoop 2.x release 2.10.2 was released years ago, and +there is no release for Hadoop 2.x for a very long time, although the Hadoop community does not +officially EOL Hadoop 2.x yet. -Hadoop 3.x is still in early access releases and has not yet been sufficiently tested by the HBase community for production use cases. ==== Use the following legend to interpret these tables: @@ -315,29 +318,29 @@ link:https://hadoop.apache.org/cve_list.html[CVEs] so we drop the support in new .Hadoop version support matrix for active release lines -[cols="1,2*^.^", options="header"] +[cols="1,1*^.^", options="header"] |=== -| | HBase-2.4.x | HBase-2.5.x -|Hadoop-2.10.[0-1] | icon:check-circle[role="green"] | icon:times-circle[role="red"] -|Hadoop-2.10.2+ | icon:check-circle[role="green"] | icon:check-circle[role="green"] -|Hadoop-3.1.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-3.1.1+ | icon:check-circle[role="green"] | icon:times-circle[role="red"] -|Hadoop-3.2.[0-2] | icon:check-circle[role="green"] | icon:times-circle[role="red"] -|Hadoop-3.2.3+ | icon:check-circle[role="green"] | icon:check-circle[role="green"] -|Hadoop-3.3.[0-1] | icon:check-circle[role="green"] | icon:times-circle[role="red"] -|Hadoop-3.3.2+ | icon:check-circle[role="green"] | icon:check-circle[role="green"] +| | HBase-2.5.x +|Hadoop-2.10.[0-1] | icon:times-circle[role="red"] +|Hadoop-2.10.2+ | icon:check-circle[role="green"] +|Hadoop-3.1.0 | icon:times-circle[role="red"] +|Hadoop-3.1.1+ | icon:times-circle[role="red"] +|Hadoop-3.2.[0-2] | icon:times-circle[role="red"] +|Hadoop-3.2.3+ | icon:check-circle[role="green"] +|Hadoop-3.3.[0-1] | icon:times-circle[role="red"] +|Hadoop-3.3.2+ | icon:check-circle[role="green"] |=== .Hadoop version support matrix for EOM 2.3+ release lines -[cols="1,1*^.^", options="header"] +[cols="1,2*^.^", options="header"] |=== -| | HBase-2.3.x -|Hadoop-2.10.x | icon:check-circle[role="green"] -|Hadoop-3.1.0 | icon:times-circle[role="red"] -|Hadoop-3.1.1+ | icon:check-circle[role="green"] -|Hadoop-3.2.x | icon:check-circle[role="green"] -|Hadoop-3.3.x | icon:check-circle[role="green"] +| | HBase-2.3.x | HBase-2.4.x +|Hadoop-2.10.x | icon:check-circle[role="green"] | icon:check-circle[role="green"] +|Hadoop-3.1.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-3.1.1+ | icon:check-circle[role="green"] | icon:check-circle[role="green"] +|Hadoop-3.2.x | icon:check-circle[role="green"] | icon:check-circle[role="green"] +|Hadoop-3.3.x | icon:check-circle[role="green"] | icon:check-circle[role="green"] |=== .Hadoop version support matrix for EOM 2.x release lines From f1d327866dd02cc6168c210014e7f114a738030a Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 8 Jul 2024 10:56:31 +0800 Subject: [PATCH 446/514] HBASE-28710 Remove download links for 2.4.x on download page (#6050) Signed-off-by: Yi Mei --- src/site/xdoc/downloads.xml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 88cb7ac8d7f0..1a3e08ebe184 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -118,29 +118,6 @@ under the License.

    - - - - - - - -
    stable release
    - 2.4.18 - - 2024/05/25 - - 2.4.18 vs 2.4.17 - - Changes - - Release Notes - - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) -
    -
    From 5c3c8af4ff02dc029d4a27d7a6ca706f6e983610 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 8 Jul 2024 15:10:55 +0800 Subject: [PATCH 447/514] HBASE-28698 Add hadoop 3.4.0 in hadoop check (#6052) Signed-off-by: Pankaj Kumar --- dev-support/hbase-personality.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 25eee1463c10..a6a3d95e937a 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -591,16 +591,16 @@ function hadoopcheck_rebuild if [[ "${PATCH_BRANCH}" = *"branch-2.5"* ]]; then yetus_info "Setting Hadoop 3 versions to test based on branch-2.5 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.2.4 3.3.6" + hbase_hadoop3_versions="3.2.4 3.3.6 3.4.0" else - hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6" + hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6 3.4.0" fi else yetus_info "Setting Hadoop 3 versions to test based on branch-2.6+/master/feature branch rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.3.6" + hbase_hadoop3_versions="3.3.6 3.4.0" else - hbase_hadoop3_versions="3.3.5 3.3.6" + hbase_hadoop3_versions="3.3.5 3.3.6 3.4.0" fi fi From 610af4855f475b2bfa3d526bf78873677ddcff84 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Mon, 8 Jul 2024 13:59:49 +0200 Subject: [PATCH 448/514] HBASE-28685 Support non-root context in REST RemoteHTable and RemodeAdmin (#6013) Signed-off-by: Duo Zhang --- .../hadoop/hbase/rest/client/RemoteAdmin.java | 23 +++++++++++----- .../hbase/rest/client/RemoteHTable.java | 26 ++++++++++++------- 2 files changed, 32 insertions(+), 17 deletions(-) diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java index bccc97deca8a..cddbf1c2c46f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java @@ -45,6 +45,7 @@ public class RemoteAdmin { final String accessToken; final int maxRetries; final long sleepTime; + private String pathPrefix = "/"; // This unmarshaller is necessary for getting the /version/cluster resource. // This resource does not support protobufs. Therefore this is necessary to @@ -79,6 +80,14 @@ public RemoteAdmin(Client client, Configuration conf, String accessToken) { this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); } + /** + * Constructor + */ + public RemoteAdmin(Client client, Configuration conf, String accessToken, String pathPrefix) { + this(client, conf, accessToken); + this.pathPrefix = pathPrefix + "/"; + } + /** * @param tableName name of table to check * @return true if all regions of the table are available @@ -95,7 +104,7 @@ public boolean isTableAvailable(String tableName) throws IOException { public VersionModel getRestVersion() throws IOException { StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); @@ -136,7 +145,7 @@ public VersionModel getRestVersion() throws IOException { public StorageClusterStatusModel getClusterStatus() throws IOException { StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); @@ -175,7 +184,7 @@ public StorageClusterStatusModel getClusterStatus() throws IOException { public StorageClusterVersionModel getClusterVersion() throws IOException { StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); @@ -221,7 +230,7 @@ public StorageClusterVersionModel getClusterVersion() throws IOException { */ public boolean isTableAvailable(byte[] tableName) throws IOException { StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); @@ -260,7 +269,7 @@ public boolean isTableAvailable(byte[] tableName) throws IOException { public void createTable(TableDescriptor desc) throws IOException { TableSchemaModel model = new TableSchemaModel(desc); StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); @@ -306,7 +315,7 @@ public void deleteTable(final String tableName) throws IOException { */ public void deleteTable(final byte[] tableName) throws IOException { StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); @@ -342,7 +351,7 @@ public void deleteTable(final byte[] tableName) throws IOException { public TableListModel getTableList() throws IOException { StringBuilder path = new StringBuilder(); - path.append('/'); + path.append(pathPrefix); if (accessToken != null) { path.append(accessToken); path.append('/'); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 53b5742ca93d..c5db9a294926 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -93,12 +93,13 @@ public class RemoteHTable implements Table { final byte[] name; final int maxRetries; final long sleepTime; + private String pathPrefix = "/"; @SuppressWarnings("rawtypes") protected String buildRowSpec(final byte[] row, final Map familyMap, final long startTime, final long endTime, final int maxVersions) { - StringBuffer sb = new StringBuffer(); - sb.append('/'); + StringBuilder sb = new StringBuilder(); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append('/'); sb.append(toURLEncodedBytes(row)); @@ -159,7 +160,7 @@ protected String buildRowSpec(final byte[] row, final Map familyMap, final long protected String buildMultiRowSpec(final byte[][] rows, int maxVersions) { StringBuilder sb = new StringBuilder(); - sb.append('/'); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append("/multiget/"); if (rows == null || rows.length == 0) { @@ -242,6 +243,11 @@ public RemoteHTable(Client client, Configuration conf, byte[] name) { this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); } + public RemoteHTable(Client client, Configuration conf, byte[] name, String pathPrefix) { + this(client, conf, name); + this.pathPrefix = pathPrefix + "/"; + } + public byte[] getTableName() { return name.clone(); } @@ -359,7 +365,7 @@ public boolean[] exists(List gets) throws IOException { public void put(Put put) throws IOException { CellSetModel model = buildModelFromPut(put); StringBuilder sb = new StringBuilder(); - sb.append('/'); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append('/'); sb.append(toURLEncodedBytes(put.getRow())); @@ -415,7 +421,7 @@ public void put(List puts) throws IOException { // build path for multiput StringBuilder sb = new StringBuilder(); - sb.append('/'); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append("/$multiput"); // can be any nonexistent row for (int i = 0; i < maxRetries; i++) { @@ -477,7 +483,7 @@ public void flushCommits() throws IOException { @Override public TableDescriptor getDescriptor() throws IOException { StringBuilder sb = new StringBuilder(); - sb.append('/'); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append('/'); sb.append("schema"); @@ -516,8 +522,8 @@ public Scanner(Scan scan) throws IOException { } catch (Exception e) { throw new IOException(e); } - StringBuffer sb = new StringBuffer(); - sb.append('/'); + StringBuilder sb = new StringBuilder(); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append('/'); sb.append("scanner"); @@ -688,7 +694,7 @@ private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[ CellSetModel model = buildModelFromPut(put); StringBuilder sb = new StringBuilder(); - sb.append('/'); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append('/'); sb.append(toURLEncodedBytes(put.getRow())); @@ -724,7 +730,7 @@ private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, by put.add(new KeyValue(row, family, qualifier, value)); CellSetModel model = buildModelFromPut(put); StringBuilder sb = new StringBuilder(); - sb.append('/'); + sb.append(pathPrefix); sb.append(Bytes.toString(name)); sb.append('/'); sb.append(toURLEncodedBytes(row)); From 5082212e04911310e540b3851b94cebae6593361 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 8 Jul 2024 19:28:44 -0400 Subject: [PATCH 449/514] HBASE-28672 Ensure large batches are not indefinitely blocked by quotas (#6003) Co-authored-by: Ray Mattingly Signed-off-by: Bryan Beaudreault < bbeaudreault@apache.org> Signed-off-by: Nick Dimiduk --- .../hbase/quotas/DefaultOperationQuota.java | 12 +- .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 20 +++ .../hadoop/hbase/quotas/QuotaLimiter.java | 13 ++ .../hadoop/hbase/quotas/TimeBasedLimiter.java | 26 +++ .../quotas/TestDefaultOperationQuota.java | 167 ++++++++++++++++++ 5 files changed, 236 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index 2e26765a6a19..a387c04e4e51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -111,8 +111,16 @@ private void checkQuota(long numWrites, long numReads) throws RpcThrottlingExcep continue; } - limiter.checkQuota(numWrites, writeConsumed, numReads, readConsumed, - writeCapacityUnitConsumed, readCapacityUnitConsumed); + long maxRequestsToEstimate = limiter.getRequestNumLimit(); + long maxReadsToEstimate = Math.min(maxRequestsToEstimate, limiter.getReadNumLimit()); + long maxWritesToEstimate = Math.min(maxRequestsToEstimate, limiter.getWriteNumLimit()); + long maxReadSizeToEstimate = Math.min(readConsumed, limiter.getReadLimit()); + long maxWriteSizeToEstimate = Math.min(writeConsumed, limiter.getWriteLimit()); + + limiter.checkQuota(Math.min(maxWritesToEstimate, numWrites), + Math.min(maxWriteSizeToEstimate, writeConsumed), Math.min(maxReadsToEstimate, numReads), + Math.min(maxReadSizeToEstimate, readConsumed), writeCapacityUnitConsumed, + readCapacityUnitConsumed); readAvailable = Math.min(readAvailable, limiter.getReadAvailable()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java index cf1e49c12e5c..5ece0be2b5aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java @@ -65,6 +65,21 @@ public long getWriteAvailable() { throw new UnsupportedOperationException(); } + @Override + public long getRequestNumLimit() { + return Long.MAX_VALUE; + } + + @Override + public long getReadNumLimit() { + return Long.MAX_VALUE; + } + + @Override + public long getWriteNumLimit() { + return Long.MAX_VALUE; + } + @Override public long getReadAvailable() { throw new UnsupportedOperationException(); @@ -75,6 +90,11 @@ public long getReadLimit() { return Long.MAX_VALUE; } + @Override + public long getWriteLimit() { + return Long.MAX_VALUE; + } + @Override public String toString() { return "NoopQuotaLimiter"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java index 8d00a702e253..12e4c4a7c6a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java @@ -79,6 +79,19 @@ void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize, /** Returns the maximum number of bytes ever available to read */ long getReadLimit(); + /** Returns the maximum number of bytes ever available to write */ + long getWriteLimit(); + /** Returns the number of bytes available to write to avoid exceeding the quota */ long getWriteAvailable(); + + /** Returns the maximum number of requests to allow per TimeUnit */ + long getRequestNumLimit(); + + /** Returns the maximum number of reads to allow per TimeUnit */ + long getReadNumLimit(); + + /** Returns the maximum number of writes to allow per TimeUnit */ + long getWriteNumLimit(); + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index e6e143343f72..f5170b09c83e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -240,6 +240,27 @@ public long getWriteAvailable() { return writeSizeLimiter.getAvailable(); } + @Override + public long getRequestNumLimit() { + long readAndWriteLimit = readReqsLimiter.getLimit() + writeReqsLimiter.getLimit(); + + if (readAndWriteLimit < 0) { // handle overflow + readAndWriteLimit = Long.MAX_VALUE; + } + + return Math.min(reqsLimiter.getLimit(), readAndWriteLimit); + } + + @Override + public long getReadNumLimit() { + return readReqsLimiter.getLimit(); + } + + @Override + public long getWriteNumLimit() { + return writeReqsLimiter.getLimit(); + } + @Override public long getReadAvailable() { return readSizeLimiter.getAvailable(); @@ -250,6 +271,11 @@ public long getReadLimit() { return Math.min(readSizeLimiter.getLimit(), reqSizeLimiter.getLimit()); } + @Override + public long getWriteLimit() { + return Math.min(writeSizeLimiter.getLimit(), reqSizeLimiter.getLimit()); + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java index 4684be02d69d..a6b7ba6fee59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java @@ -18,21 +18,37 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; + @Category({ RegionServerTests.class, SmallTests.class }) public class TestDefaultOperationQuota { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDefaultOperationQuota.class); + private static ManualEnvironmentEdge envEdge = new ManualEnvironmentEdge(); + static { + envEdge.setValue(EnvironmentEdgeManager.currentTime()); + // only active the envEdge for quotas package + EnvironmentEdgeManagerTestHelper.injectEdgeForPackage(envEdge, + ThrottleQuotaTestUtil.class.getPackage().getName()); + } + @Test public void testScanEstimateNewScanner() { long blockSize = 64 * 1024; @@ -125,4 +141,155 @@ public void testScanEstimateShrinkingWorkload() { // shrinking workload should only shrink estimate to maxBBS assertEquals(maxBlockBytesScanned, estimate); } + + @Test + public void testLargeBatchSaturatesReadNumLimit() + throws RpcThrottlingException, InterruptedException { + int limit = 10; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setReadNum(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, limiter); + + // use the whole limit + quota.checkBatchQuota(0, limit); + + // the next request should be rejected + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(0, 1)); + + envEdge.incValue(1000); + // after the TimeUnit, the limit should be refilled + quota.checkBatchQuota(0, limit); + } + + @Test + public void testLargeBatchSaturatesReadWriteLimit() + throws RpcThrottlingException, InterruptedException { + int limit = 10; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setWriteNum(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, limiter); + + // use the whole limit + quota.checkBatchQuota(limit, 0); + + // the next request should be rejected + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(1, 0)); + + envEdge.incValue(1000); + // after the TimeUnit, the limit should be refilled + quota.checkBatchQuota(limit, 0); + } + + @Test + public void testTooLargeReadBatchIsNotBlocked() + throws RpcThrottlingException, InterruptedException { + int limit = 10; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setReadNum(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, limiter); + + // use more than the limit, which should succeed rather than being indefinitely blocked + quota.checkBatchQuota(0, 10 + limit); + + // the next request should be blocked + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(0, 1)); + + envEdge.incValue(1000); + // even after the TimeUnit, the limit should not be refilled because we oversubscribed + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(0, limit)); + } + + @Test + public void testTooLargeWriteBatchIsNotBlocked() + throws RpcThrottlingException, InterruptedException { + int limit = 10; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setWriteNum(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, limiter); + + // use more than the limit, which should succeed rather than being indefinitely blocked + quota.checkBatchQuota(10 + limit, 0); + + // the next request should be blocked + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(1, 0)); + + envEdge.incValue(1000); + // even after the TimeUnit, the limit should not be refilled because we oversubscribed + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(limit, 0)); + } + + @Test + public void testTooLargeWriteSizeIsNotBlocked() + throws RpcThrottlingException, InterruptedException { + int limit = 50; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setWriteSize(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, limiter); + + // writes are estimated a 100 bytes, so this will use 2x the limit but should not be blocked + quota.checkBatchQuota(1, 0); + + // the next request should be blocked + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(1, 0)); + + envEdge.incValue(1000); + // even after the TimeUnit, the limit should not be refilled because we oversubscribed + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(limit, 0)); + } + + @Test + public void testTooLargeReadSizeIsNotBlocked() + throws RpcThrottlingException, InterruptedException { + long blockSize = 65536; + long limit = blockSize / 2; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setReadSize(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = + new DefaultOperationQuota(new Configuration(), (int) blockSize, limiter); + + // reads are estimated at 1 block each, so this will use ~2x the limit but should not be blocked + quota.checkBatchQuota(0, 1); + + // the next request should be blocked + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(0, 1)); + + envEdge.incValue(1000); + // even after the TimeUnit, the limit should not be refilled because we oversubscribed + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota((int) limit, 1)); + } + + @Test + public void testTooLargeRequestSizeIsNotBlocked() + throws RpcThrottlingException, InterruptedException { + long blockSize = 65536; + long limit = blockSize / 2; + QuotaProtos.Throttle throttle = + QuotaProtos.Throttle.newBuilder().setReqSize(QuotaProtos.TimedQuota.newBuilder() + .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + DefaultOperationQuota quota = + new DefaultOperationQuota(new Configuration(), (int) blockSize, limiter); + + // reads are estimated at 1 block each, so this will use ~2x the limit but should not be blocked + quota.checkBatchQuota(0, 1); + + // the next request should be blocked + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota(0, 1)); + + envEdge.incValue(1000); + // even after the TimeUnit, the limit should not be refilled because we oversubscribed + assertThrows(RpcThrottlingException.class, () -> quota.checkBatchQuota((int) limit, 1)); + } } From 2a4c7cd7c89610345e2f2777977996cda3101ac8 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 9 Jul 2024 15:33:26 +0800 Subject: [PATCH 450/514] HBASE-28718 Should support different license name for 'Apache License, Version 2.0' (#6056) Signed-off-by: GeorryHuang --- .../src/main/resources/META-INF/LICENSE.vm | 11 +- .../main/resources/supplemental-models.xml | 468 +----------------- 2 files changed, 12 insertions(+), 467 deletions(-) diff --git a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm index afbf0b8842c3..afa1a9f50cf9 100644 --- a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm +++ b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm @@ -1465,7 +1465,16 @@ project website ${dep.url} project source ${dep.scm.url} -#elseif( !(${dep.licenses[0].name.contains("Apache License, Version 2.0")} || ${dep.licenses[0].name.contains("The Apache Software License, Version 2.0")}) ) +#* + "Apache License, Version 2.0" is the official name in the past + "The Apache Software License, Version 2.0" is the official name in the past too + "Apache Software License - Version 2.0" is what jetty uses in pom + "Apache-2.0" is the new short official name +*# +#elseif( !(${dep.licenses[0].name.contains("Apache License, Version 2.0")} + || ${dep.licenses[0].name.contains("The Apache Software License, Version 2.0")} + || ${dep.licenses[0].name.contains("Apache Software License - Version 2.0")} + || ${dep.licenses[0].name.contains("Apache-2.0")}) ) #if( ${dep.licenses[0].name.contains("CDDL")} ) #if( ${dep.licenses[0].name.contains("1.0")} ) #set($aggregated = $cddl_1_0.add($dep)) diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml b/hbase-resource-bundle/src/main/resources/supplemental-models.xml index 5dbdd7b42556..075463af20d8 100644 --- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml +++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml @@ -41,242 +41,6 @@ under the License. - - - org.apache.zookeeper - zookeeper - - - The Apache Software Foundation - http://www.apache.org/ - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - commons-beanutils - commons-beanutils - - - The Apache Software Foundation - http://www.apache.org/ - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - commons-beanutils - commons-beanutils-core - - - The Apache Software Foundation - http://www.apache.org/ - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - - org.eclipse.jetty - jetty-http - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-io - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-jmx - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-server - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-servlet - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-util - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-util-ajax - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-webapp - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-xml - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-security - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.orbit - javax.servlet.jsp.jstl - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.orbit - org.eclipse.jdt.core - - - - Eclipse Public License 1.0 - http://www.eclipse.org/legal/epl-v10.html - repo - - - - - - - org.eclipse.jetty.toolchain - jetty-schemas - - - Common Development and Distribution License (CDDL) v1.0 - https://glassfish.dev.java.net/public/CDDLv1.0.html - repo - - - - org.javassist @@ -290,37 +54,8 @@ under the License. - - - org.xerial.snappy - snappy-java - 1.1.8.4 - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - - - commons-httpclient - commons-httpclient - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - + javax.annotation @@ -555,34 +290,6 @@ under the License. - - - org.mortbay.jetty - jetty-util - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.mortbay.jetty - jetty-sslengine - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - org.bouncycastle @@ -601,179 +308,8 @@ under the License. - - - org.eclipse.jetty - jetty-annotations - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-jndi - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-plus - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty - jetty-client - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - javax-websocket-client-impl - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - javax-websocket-server-impl - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - websocket-api - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - websocket-client - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - websocket-common - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - websocket-server - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - org.eclipse.jetty.websocket - websocket-servlet - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - - - - - - - - org.mortbay.jetty - jetty - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - + io.dropwizard.metrics From b70bd11b8bdff41c3140b9aeeb1ad91d1a1c39d9 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 10 Jul 2024 10:17:37 +0800 Subject: [PATCH 451/514] HBASE-28714 Hadoop check for hadoop 3.4.0 is failing (#6063) Signed-off-by: Nick Dimiduk --- .../asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java | 4 ++-- .../FanOutOneBlockAsyncDFSOutputSaslHelper.java | 12 +++++++++--- .../hadoop/hbase/io/asyncfs/ProtobufDecoder.java | 12 +++++++----- .../hadoop/hbase/client/TestFlushFromClient.java | 5 +++-- pom.xml | 8 ++++++++ 5 files changed, 29 insertions(+), 12 deletions(-) diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index d4a71a77a79d..7a4f624e6e06 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -30,7 +30,6 @@ import static org.apache.hbase.thirdparty.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS; import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE; -import com.google.protobuf.CodedOutputStream; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.InvocationTargetException; @@ -92,6 +91,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator; @@ -351,7 +351,7 @@ private static void requestWriteBlock(Channel channel, StorageType storageType, writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); int protoLen = proto.getSerializedSize(); ByteBuf buffer = - channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); + channel.alloc().buffer(3 + CodedOutputStream.computeUInt32SizeNoTag(protoLen) + protoLen); buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); buffer.writeByte(Op.WRITE_BLOCK.code); proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 00b6631379bc..4f5ae5b22a98 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE; -import com.google.protobuf.CodedOutputStream; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.Field; @@ -81,6 +80,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; import org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf; @@ -391,7 +391,7 @@ static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, Class builderClass = DataTransferEncryptorMessageProto.Builder.class; // Try the unrelocated ByteString - Class byteStringClass = com.google.protobuf.ByteString.class; + Class byteStringClass; try { // See if it can load the relocated ByteString, which comes from hadoop-thirdparty. byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString"); @@ -400,6 +400,12 @@ static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, } catch (ClassNotFoundException e) { LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + " Assuming this is below Hadoop 3.3.0", e); + try { + byteStringClass = Class.forName("com.google.protobuf.ByteString"); + LOG.debug("com.google.protobuf.ByteString found."); + } catch (ClassNotFoundException ex) { + throw new RuntimeException(ex); + } } // LiteralByteString is a package private class in protobuf. Make it accessible. @@ -446,7 +452,7 @@ private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload, } DataTransferEncryptorMessageProto proto = builder.build(); int size = proto.getSerializedSize(); - size += CodedOutputStream.computeRawVarint32Size(size); + size += CodedOutputStream.computeUInt32SizeNoTag(size); ByteBuf buf = ctx.alloc().buffer(size); proto.writeDelimitedTo(new ByteBufOutputStream(buf)); safeWrite(ctx, buf); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java index a0b5cc00841b..35344708b36d 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java @@ -109,11 +109,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) static { boolean hasParser = false; - // These are the protobuf classes coming from Hadoop. Not the one from hbase-shaded-protobuf - protobufMessageLiteClass = com.google.protobuf.MessageLite.class; - protobufMessageLiteBuilderClass = com.google.protobuf.MessageLite.Builder.class; - try { protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite"); protobufMessageLiteBuilderClass = @@ -121,6 +117,12 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) LOG.debug("Hadoop 3.3 and above shades protobuf."); } catch (ClassNotFoundException e) { LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e); + try { + protobufMessageLiteClass = Class.forName("com.google.protobuf.MessageLite"); + protobufMessageLiteBuilderClass = Class.forName("com.google.protobuf.MessageLite$Builder"); + } catch (ClassNotFoundException ex) { + throw new RuntimeException("can not initialize protobuf related classes for hadoop", ex); + } } try { @@ -130,7 +132,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) hasParser = true; } catch (NoSuchMethodException e) { // If the method is not found, we are in trouble. Abort. - throw new RuntimeException(e); + throw new RuntimeException("can not initialize protobuf related classes for hadoop", e); } HAS_PARSER = hasParser; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java index 4756a5b22304..5c78aa9b612c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.apache.hadoop.io.IOUtils; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -47,6 +46,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestFlushFromClient { @@ -76,7 +77,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.cleanup(null, asyncConn); + Closeables.close(asyncConn, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/pom.xml b/pom.xml index 1da1648e1355..a35307149d94 100644 --- a/pom.xml +++ b/pom.xml @@ -3937,6 +3937,10 @@ org.slf4j slf4j-reload4j + + org.bouncycastle + bcprov-jdk15on + @@ -3994,6 +3998,10 @@ javax.ws.rs jsr311-api + + org.bouncycastle + bcprov-jdk15on + From 6f3b8c8bda3960e3c01960c197f832b938a4b8df Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 10 Jul 2024 10:33:34 +0800 Subject: [PATCH 452/514] HBASE-28712 Add JDK17 in the support matrix in our ref guide (#6051) Signed-off-by: Bryan Beaudreault < bbeaudreault@apache.org> --- .../asciidoc/_chapters/configuration.adoc | 62 +++++++++++++------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 9a95eb22a481..8a732fd0e535 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -130,62 +130,86 @@ JVM. When selecting your JDK distribution, please take this into consideration. ==== .Java support by release line -[cols="5*^.^", options="header"] +[cols="6*^.^", options="header"] |=== |HBase Version |JDK 6 |JDK 7 |JDK 8 |JDK 11 +|JDK 17 -|HBase 2.3+ -|icon:times-circle[role="red"] -|icon:times-circle[role="red"] +|HBase 2.6 +2+|icon:times-circle[role="red"] +|icon:check-circle[role="green"] +|icon:check-circle[role="green"] +|icon:check-circle[role="green"] + + +|HBase 2.5 +2+|icon:times-circle[role="red"] +|icon:check-circle[role="green"] |icon:check-circle[role="green"] |icon:exclamation-circle[role="yellow"]* -|HBase 2.0-2.2 -|icon:times-circle[role="red"] +|HBase 2.4 +2+|icon:times-circle[role="red"] +|icon:check-circle[role="green"] +|icon:check-circle[role="green"] |icon:times-circle[role="red"] + +|HBase 2.3 +2+|icon:times-circle[role="red"] |icon:check-circle[role="green"] +|icon:exclamation-circle[role="yellow"]* |icon:times-circle[role="red"] +|HBase 2.0-2.2 +2+|icon:times-circle[role="red"] +|icon:check-circle[role="green"] +2+|icon:times-circle[role="red"] + |HBase 1.2+ |icon:times-circle[role="red"] |icon:check-circle[role="green"] |icon:check-circle[role="green"] -|icon:times-circle[role="red"] +2+|icon:times-circle[role="red"] |HBase 1.0-1.1 |icon:times-circle[role="red"] |icon:check-circle[role="green"] |icon:exclamation-circle[role="yellow"] -|icon:times-circle[role="red"] +2+|icon:times-circle[role="red"] |HBase 0.98 |icon:check-circle[role="green"] |icon:check-circle[role="green"] |icon:exclamation-circle[role="yellow"] -|icon:times-circle[role="red"] +2+|icon:times-circle[role="red"] |HBase 0.94 |icon:check-circle[role="green"] |icon:check-circle[role="green"] -|icon:times-circle[role="red"] -|icon:times-circle[role="red"] +3+|icon:times-circle[role="red"] |=== -.A Note on JDK11 icon:exclamation-circle[role="yellow"]* +.A Note on JDK11/JDK17 icon:exclamation-circle[role="yellow"]* [WARNING] ==== -Preliminary support for JDK11 is introduced with HBase 2.3.0. This support is limited to -compilation and running the full test suite. There are open questions regarding the runtime -compatibility of JDK11 with Apache ZooKeeper and Apache Hadoop -(link:https://issues.apache.org/jira/browse/HADOOP-15338[HADOOP-15338]). Significantly, neither -project has yet released a version with explicit runtime support for JDK11. The remaining known -issues in HBase are catalogued in -link:https://issues.apache.org/jira/browse/HBASE-22972[HBASE-22972]. +Preliminary support for JDK11 is introduced with HBase 2.3.0, and for JDK17 is introduced with +HBase 2.5.x. We will compile and run test suites with JDK11/17 in pre commit checks and nightly +checks. We will mark the support as icon:check-circle[role="green"] as long as we have run some +ITs with the JDK version and also there are users in the community use the JDK version in real +production clusters. + +For JDK11/JDK17 support in HBase, please refer to +link:https://issues.apache.org/jira/browse/HBASE-22972[HBASE-22972] and +link:https://issues.apache.org/jira/browse/HBASE-26038[HBASE-26038] + +For JDK11/JDK17 support in Hadoop, which may also affect HBase, please refer to +link:https://issues.apache.org/jira/browse/HADOOP-15338[HADOOP-15338] and +link:https://issues.apache.org/jira/browse/HADOOP-17177[HADOOP-17177] ==== NOTE: You must set `JAVA_HOME` on each node of your cluster. _hbase-env.sh_ provides a handy From 18e1cc53d3133de46d0f9adbefc690fa06402242 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 10 Jul 2024 10:34:55 +0800 Subject: [PATCH 453/514] HBASE-28668 Add documentation about specifying connection uri in replication and map reduce jobs (#6009) Signed-off-by: Bryan Beaudreault < bbeaudreault@apache.org> Signed-off-by: Nick Dimiduk --- src/main/asciidoc/_chapters/ops_mgt.adoc | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index fbde391a0394..c8bad4e96f2a 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -475,8 +475,11 @@ Options: endtime end of the time range. Ignored if no starttime specified. versions number of cell versions to copy new.name new table's name + peer.uri The URI of the peer cluster peer.adr Address of the peer cluster given in the format hbase.zookeeer.quorum:hbase.zookeeper.client.port:zookeeper.znode.parent + Do not take effect if peer.uri is specified + Deprecated, please use peer.uri instead families comma-separated list of families to copy To copy from cf1 to cf2, give sourceCfName:destCfName. To keep the same name, just give "cfName" @@ -498,10 +501,14 @@ For performance consider the following general options: -Dmapred.map.tasks.speculative.execution=false ---- +Starting from 3.0.0, we introduce a `peer.uri` option so the `peer.adr` option is deprecated. Please +use connection URI for specifying HBase clusters. For all previous versions, you should still use +the `peer.adr` option. + .Scanner Caching [NOTE] ==== -Caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration. +Caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration. ==== .Versions @@ -586,10 +593,18 @@ $ ./bin/hbase org.apache.hadoop.hbase.mapreduce.SyncTable --help Usage: SyncTable [options] Options: + sourceuri Cluster connection uri of the source table + (defaults to cluster in classpath's config) sourcezkcluster ZK cluster key of the source table (defaults to cluster in classpath's config) + Do not take effect if sourceuri is specifie + Deprecated, please use sourceuri instead + targeturi Cluster connection uri of the target table + (defaults to cluster in classpath's config) targetzkcluster ZK cluster key of the target table (defaults to cluster in classpath's config) + Do not take effect if targeturi is specified + Deprecated, please use targeturi instead dryrun if true, output counters but no writes (defaults to false) doDeletes if false, does not perform deletes @@ -613,6 +628,10 @@ Examples: $ bin/hbase org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase hdfs://nn:9000/hashes/tableA tableA tableA ---- +Starting from 3.0.0, we introduce `sourceuri` and `targeturi` options so `sourcezkcluster` and +`targetzkcluster` are deprecated. Please use connection URI for specifying HBase clusters. For all +previous versions, you should still use `sourcezkcluster` and `targetzkcluster`. + Cell comparison takes ROW/FAMILY/QUALIFIER/TIMESTAMP/VALUE into account for equality. When syncing at the target, missing cells will be added with original timestamp value from source. That may cause unexpected results after SyncTable completes, for example, if missing cells on target have a delete marker with a timestamp T2 (say, a bulk delete performed by mistake), but source cells timestamps have an @@ -2449,6 +2468,7 @@ The `Peers` Znode:: It consists of a list of all peer replication clusters, along with the status of each of them. The value of each peer is its cluster key, which is provided in the HBase Shell. The cluster key contains a list of ZooKeeper nodes in the cluster's quorum, the client port for the ZooKeeper quorum, and the base znode for HBase in HDFS on that cluster. + Starting from 3.0.0, you can also specify connection URI as a cluster key. See <> for more details about connection URI. The `RS` Znode:: The `rs` znode contains a list of WAL logs which need to be replicated. From 429d6bab62a7926afc6e84f282f15cab39598725 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 10 Jul 2024 10:39:33 +0200 Subject: [PATCH 454/514] HBASE-28717 Support FuzzyRowFilter in REST interface (#6060) Signed-off-by: Ankit Singhal --- .../hadoop/hbase/filter/FuzzyRowFilter.java | 27 +++++++ .../hadoop/hbase/rest/model/ScannerModel.java | 78 ++++++++++++++++++- .../hbase/rest/TestScannersWithFilters.java | 25 +++++- 3 files changed, 128 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index 244dc9c7ca5f..f8c35b465284 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -161,6 +161,33 @@ private boolean isPreprocessedMask(byte[] mask) { return true; } + /** + * Returns the Fuzzy keys in the format expected by the constructor. + * @return the Fuzzy keys in the format expected by the constructor + */ + public List> getFuzzyKeys() { + List> returnList = new ArrayList<>(fuzzyKeysData.size()); + for (Pair fuzzyKey : fuzzyKeysData) { + Pair returnKey = new Pair<>(); + // This won't revert the original key's don't care values, but we don't care. + returnKey.setFirst(Arrays.copyOf(fuzzyKey.getFirst(), fuzzyKey.getFirst().length)); + byte[] returnMask = Arrays.copyOf(fuzzyKey.getSecond(), fuzzyKey.getSecond().length); + if (UNSAFE_UNALIGNED && isPreprocessedMask(returnMask)) { + // Revert the preprocessing. + for (int i = 0; i < returnMask.length; i++) { + if (returnMask[i] == -1) { + returnMask[i] = 0; // -1 >> 0 + } else if (returnMask[i] == 2) { + returnMask[i] = 1;// 2 >> 1 + } + } + } + returnKey.setSecond(returnMask); + returnList.add(returnKey); + } + return returnList; + } + @Override public void reset() throws IOException { filterRow = false; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index a18a9ba427ab..1ccb541c8876 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.FuzzyRowFilter; import org.apache.hadoop.hbase.filter.InclusiveStopFilter; import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; @@ -70,6 +71,7 @@ import org.apache.hadoop.hbase.rest.RestUtil; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; @@ -279,6 +281,63 @@ public boolean equals(Object obj) { } + static class FuzzyKeyModel { + + protected byte[] key; + + protected byte[] mask; + + public FuzzyKeyModel() { + } + + public FuzzyKeyModel(Pair keyWithMask) { + this.key = keyWithMask.getFirst(); + this.mask = keyWithMask.getSecond(); + } + + public Pair build() { + return new Pair<>(key, mask); + } + + public byte[] getKey() { + return key; + } + + public void setKey(byte[] key) { + this.key = key; + } + + public byte[] getMask() { + return mask; + } + + public void setMask(byte[] mask) { + this.mask = mask; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(key); + result = prime * result + Arrays.hashCode(mask); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof FuzzyKeyModel)) { + return false; + } + FuzzyKeyModel other = (FuzzyKeyModel) obj; + return Arrays.equals(key, other.key) && Arrays.equals(mask, other.mask); + } + + } + // A grab bag of fields, would have been a union if this were C. // These are null by default and will only be serialized if set (non null). @XmlAttribute @@ -321,6 +380,8 @@ public boolean equals(Object obj) { private List ranges; @XmlElement public List timestamps; + @XmlElement + private List fuzzyKeys; static enum FilterType { ColumnCountGetFilter, @@ -345,7 +406,8 @@ static enum FilterType { SkipFilter, TimestampsFilter, ValueFilter, - WhileMatchFilter + WhileMatchFilter, + FuzzyRowFilter } public FilterModel() { @@ -458,6 +520,12 @@ public FilterModel(Filter filter) { this.filters = new ArrayList<>(); this.filters.add(new FilterModel(((WhileMatchFilter) filter).getFilter())); break; + case FuzzyRowFilter: + this.fuzzyKeys = new ArrayList<>(((FuzzyRowFilter) filter).getFuzzyKeys().size()); + for (Pair keyWithMask : ((FuzzyRowFilter) filter).getFuzzyKeys()) { + this.fuzzyKeys.add(new FuzzyKeyModel(keyWithMask)); + } + break; default: throw new RuntimeException("unhandled filter type " + type); } @@ -569,6 +637,14 @@ public Filter build() { case WhileMatchFilter: filter = new WhileMatchFilter(filters.get(0).build()); break; + case FuzzyRowFilter: { + ArrayList> fuzzyKeyArgs = new ArrayList<>(fuzzyKeys.size()); + for (FuzzyKeyModel keyModel : fuzzyKeys) { + fuzzyKeyArgs.add(keyModel.build()); + } + filter = new FuzzyRowFilter(fuzzyKeyArgs); + } + break; default: throw new RuntimeException("unhandled filter type: " + type); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 865a6a18cd0c..4b32e8a4c22a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.FilterList.Operator; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.FuzzyRowFilter; import org.apache.hadoop.hbase.filter.InclusiveStopFilter; import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; import org.apache.hadoop.hbase.filter.PageFilter; @@ -71,6 +72,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -230,7 +232,7 @@ private static void verifyScan(Scan s, long expectedRows, long expectedKeys) thr int rows = cells.getRows().size(); assertEquals( - "Scanned too many rows! Only expected " + expectedRows + " total but scanned " + rows, + "Unexpected number of rows! Expected " + expectedRows + " total but scanned " + rows, expectedRows, rows); for (RowModel row : cells.getRows()) { int count = row.getCells().size(); @@ -978,4 +980,25 @@ public void testMultiRowRangeFilter() throws Exception { s.setFilter(new MultiRowRangeFilter(ranges)); verifyScan(s, expectedRows, expectedKeys); } + + @Test + public void testFuzzyRowFilter() throws Exception { + long expectedRows = 4; + long expectedKeys = colsPerRow; + List> fuzzyKeys = new ArrayList<>(); + + // Exact match for ROWS_ONE[0] (one row) + byte[] rowOneMask = new byte[ROWS_ONE[0].length]; + Arrays.fill(rowOneMask, (byte) 0); + fuzzyKeys.add(new Pair<>(ROWS_ONE[0], rowOneMask)); + // All ROW_TWO keys (three rows) + byte[] rowTwoMask = new byte[ROWS_TWO[0].length]; + Arrays.fill(rowTwoMask, (byte) 0); + rowTwoMask[rowTwoMask.length - 1] = (byte) 1; + fuzzyKeys.add(new Pair<>(ROWS_TWO[2], rowTwoMask)); + + Scan s = new Scan(); + s.setFilter(new FuzzyRowFilter(fuzzyKeys)); + verifyScan(s, expectedRows, expectedKeys); + } } From 2edd7e0f23de6490c63494635a11d0fd262bcc6e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 11 Jul 2024 22:02:40 +0800 Subject: [PATCH 455/514] HBASE-28684 Remove CellWrapper and use ExtendedCell internally in client side data structure (#6044) Signed-off-by: Xin Sun --- .../mapreduce/MapReduceHFileSplitterJob.java | 3 +- .../client/AllowPartialScanResultCache.java | 6 +- .../client/AsyncBatchRpcRetryingCaller.java | 10 +- .../hbase/client/BatchScanResultCache.java | 12 +- .../hadoop/hbase/client/ConnectionUtils.java | 5 +- .../apache/hadoop/hbase/client/Delete.java | 25 +- .../apache/hadoop/hbase/client/Increment.java | 11 +- .../apache/hadoop/hbase/client/Mutation.java | 222 ++-------- .../client/PackagePrivateFieldAccessor.java | 20 + .../org/apache/hadoop/hbase/client/Put.java | 21 +- .../apache/hadoop/hbase/client/Result.java | 65 ++- .../org/apache/hadoop/hbase/ipc/Call.java | 13 +- .../hadoop/hbase/ipc/CellBlockBuilder.java | 18 +- .../ipc/DelegatingHBaseRpcController.java | 8 +- .../hadoop/hbase/ipc/HBaseRpcController.java | 12 +- .../hbase/ipc/HBaseRpcControllerImpl.java | 25 +- .../hadoop/hbase/ipc/RpcConnection.java | 4 +- .../hbase/ipc/RpcControllerFactory.java | 12 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 5 +- .../shaded/protobuf/RequestConverter.java | 38 +- .../hadoop/hbase/client/TestMutation.java | 12 +- .../hbase/ipc/TestCellBlockBuilder.java | 28 +- .../hbase/ipc/TestHBaseRpcControllerImpl.java | 30 +- .../shaded/protobuf/TestProtobufUtil.java | 11 +- .../hadoop/hbase/ByteBufferKeyValue.java | 4 +- .../hadoop/hbase/CellComparatorImpl.java | 2 +- .../org/apache/hadoop/hbase/CellUtil.java | 25 +- .../hadoop/hbase/ExtendedCellScannable.java | 37 ++ .../hadoop/hbase/ExtendedCellScanner.java | 37 ++ .../org/apache/hadoop/hbase/KeyValue.java | 4 +- .../apache/hadoop/hbase/KeyValueTestUtil.java | 8 +- .../org/apache/hadoop/hbase/KeyValueUtil.java | 11 +- .../apache/hadoop/hbase/PrivateCellUtil.java | 289 +++++++++---- .../java/org/apache/hadoop/hbase/TagUtil.java | 6 +- .../hadoop/hbase/codec/BaseDecoder.java | 10 +- .../hadoop/hbase/codec/BaseEncoder.java | 4 +- .../apache/hadoop/hbase/codec/CellCodec.java | 6 +- .../hadoop/hbase/codec/CellCodecWithTags.java | 6 +- .../org/apache/hadoop/hbase/codec/Codec.java | 4 +- .../hadoop/hbase/codec/KeyValueCodec.java | 14 +- .../hbase/codec/KeyValueCodecWithTags.java | 10 +- .../hadoop/hbase/io/CellOutputStream.java | 4 +- ...ner.java => SizedExtendedCellScanner.java} | 6 +- .../hbase/io/encoding/EncodedDataBlock.java | 8 +- .../hadoop/hbase/io/encoding/NoneEncoder.java | 4 +- .../hadoop/hbase/TestCellComparator.java | 2 +- .../org/apache/hadoop/hbase/TestCellUtil.java | 165 +------ .../hadoop/hbase/codec/TestCellCodec.java | 12 +- .../hbase/codec/TestCellCodecWithTags.java | 16 +- .../codec/TestKeyValueCodecWithTags.java | 4 +- .../client/TestRpcControllerFactory.java | 9 +- .../hbase/mapreduce/CellSerialization.java | 8 +- .../hbase/mapreduce/CellSortReducer.java | 18 +- .../mapreduce/ExtendedCellSerialization.java | 2 +- .../hbase/mapreduce/HFileOutputFormat2.java | 4 +- .../apache/hadoop/hbase/mapreduce/Import.java | 30 +- .../hadoop/hbase/mapreduce/PutCombiner.java | 52 +-- .../hbase/mapreduce/PutSortReducer.java | 11 +- .../hadoop/hbase/mapreduce/WALPlayer.java | 3 +- .../hbase/util/MapReduceExtendedCell.java | 21 +- .../mapreduce/TestHFileOutputFormat2.java | 3 +- .../hbase/mapreduce/TestImportExport.java | 25 +- .../AsyncRegionReplicationRetryingCaller.java | 4 +- .../hbase/client/AsyncRegionServerAdmin.java | 11 +- .../hadoop/hbase/codec/MessageCodec.java | 6 +- .../hbase/io/hfile/HFilePrettyPrinter.java | 3 +- .../apache/hadoop/hbase/ipc/CallRunner.java | 6 +- .../hadoop/hbase/ipc/NettyServerCall.java | 4 +- .../hbase/ipc/NettyServerRpcConnection.java | 4 +- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 7 +- .../apache/hadoop/hbase/ipc/RpcServer.java | 3 +- .../hadoop/hbase/ipc/RpcServerInterface.java | 5 +- .../apache/hadoop/hbase/ipc/ServerCall.java | 12 +- .../hadoop/hbase/ipc/ServerRpcConnection.java | 6 +- .../hadoop/hbase/ipc/SimpleServerCall.java | 4 +- .../hbase/ipc/SimpleServerRpcConnection.java | 4 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 8 +- .../protobuf/ReplicationProtobufUtil.java | 35 +- .../hadoop/hbase/regionserver/HRegion.java | 22 +- .../regionserver/MobReferenceOnlyFilter.java | 5 +- .../hbase/regionserver/RSRpcServices.java | 34 +- .../regionserver/ReversedMobStoreScanner.java | 2 +- .../wal/AsyncProtobufLogWriter.java | 3 +- .../regionserver/wal/ProtobufLogWriter.java | 3 +- .../regionserver/wal/SecureWALCellCodec.java | 10 +- .../hbase/regionserver/wal/WALCellCodec.java | 8 +- .../ReplaySyncReplicationWALCallable.java | 7 +- .../security/access/AccessController.java | 12 +- .../hbase/security/access/AuthManager.java | 5 +- .../security/access/PermissionStorage.java | 3 +- .../visibility/VisibilityController.java | 15 +- ...estCustomPriorityRpcControllerFactory.java | 4 +- .../TestAllowPartialScanResultCache.java | 6 +- .../client/TestBatchScanResultCache.java | 14 +- .../hbase/client/TestFromClientSide.java | 5 +- .../client/TestIncrementsFromClientSide.java | 5 +- .../client/TestMalformedCellFromClient.java | 7 +- .../hadoop/hbase/client/TestResult.java | 27 +- .../hadoop/hbase/codec/CodecPerformance.java | 23 +- .../hbase/codec/TestCellMessageCodec.java | 12 +- .../TestPassCustomCellViaRegionObserver.java | 406 ------------------ .../TestPostIncrementAndAppendBeforeWAL.java | 13 +- .../hadoop/hbase/filter/TestFilterList.java | 7 +- .../hbase/io/compress/HFileTestBase.java | 4 +- .../TestBufferedDataBlockEncoder.java | 8 +- .../io/encoding/TestDataBlockEncoders.java | 10 +- .../hbase/io/hfile/TestHFileEncryption.java | 4 +- .../io/hfile/TestScannerFromBucketCache.java | 26 +- .../hfile/TestSeekBeforeWithInlineBlocks.java | 6 +- .../hadoop/hbase/io/hfile/TestSeekTo.java | 3 +- .../hadoop/hbase/ipc/AbstractTestIPC.java | 14 +- .../ipc/TestNettyChannelWritability.java | 11 +- .../hbase/ipc/TestProtobufRpcServiceImpl.java | 12 +- .../hadoop/hbase/master/MockRegionServer.java | 9 +- .../hbase/mob/TestMobStoreCompaction.java | 13 +- .../namequeues/TestNamedQueueRecorder.java | 6 +- .../hbase/namequeues/TestRpcLogDetails.java | 6 +- .../region/TestRegionProcedureStore.java | 6 +- .../protobuf/TestReplicationProtobuf.java | 10 +- .../regionserver/DataBlockEncodingTool.java | 10 +- .../EncodedSeekPerformanceTest.java | 5 +- .../hbase/regionserver/TestBlocksScanned.java | 6 +- .../hbase/regionserver/TestCellFlatSet.java | 12 +- .../hbase/regionserver/TestHMobStore.java | 12 +- .../hbase/regionserver/TestHRegion.java | 33 +- .../TestHRegionServerBulkLoad.java | 5 +- .../hadoop/hbase/regionserver/TestHStore.java | 34 +- .../hbase/regionserver/TestHStoreFile.java | 5 +- .../hbase/regionserver/TestKeepDeletes.java | 11 +- .../regionserver/TestMultiColumnScanner.java | 9 +- .../regionserver/TestReplicateToReplica.java | 4 +- ...estStoreFileScannerWithTagCompression.java | 4 +- .../hadoop/hbase/regionserver/TestTags.java | 20 +- .../compactions/TestCompactor.java | 4 +- .../replication/TestReplicationWithTags.java | 5 +- ...ExpAsStringVisibilityLabelServiceImpl.java | 5 +- .../TestVisibilityLabelsReplication.java | 12 +- .../hadoop/hbase/util/HFileTestUtil.java | 3 +- .../hadoop/hbase/thrift2/ThriftUtilities.java | 18 +- 139 files changed, 1154 insertions(+), 1491 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScannable.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScanner.java rename hbase-common/src/main/java/org/apache/hadoop/hbase/io/{SizedCellScanner.java => SizedExtendedCellScanner.java} (85%) delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index 04180972885e..2c073c56f7eb 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -76,7 +77,7 @@ static class HFileCellMapper extends Mapper actions, int tries) { } private ClientProtos.MultiRequest buildReq(Map actionsByRegion, - List cells, Map indexMap) throws IOException { + List cells, Map indexMap) throws IOException { ClientProtos.MultiRequest.Builder multiRequestBuilder = ClientProtos.MultiRequest.newBuilder(); ClientProtos.RegionAction.Builder regionActionBuilder = ClientProtos.RegionAction.newBuilder(); ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); @@ -382,7 +382,7 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr return; } ClientProtos.MultiRequest req; - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); // Map from a created RegionAction to the original index for a RowMutations within // the original list of actions. This will be used to process the results when there // is RowMutations/CheckAndMutate in the action list. @@ -398,7 +398,7 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr calcPriority(serverReq.getPriority(), tableName), tableName); controller.setRequestAttributes(requestAttributes); if (!cells.isEmpty()) { - controller.setCellScanner(createCellScanner(cells)); + controller.setCellScanner(PrivateCellUtil.createExtendedCellScanner(cells)); } stub.multi(controller, req, resp -> { if (controller.failed()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java index b0423c6c5cea..9c2b658dc9ac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java @@ -24,8 +24,8 @@ import java.util.ArrayList; import java.util.Deque; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -44,7 +44,7 @@ public class BatchScanResultCache implements ScanResultCache { // used to filter out the cells that already returned to user as we always start from the // beginning of a row when retry. - private Cell lastCell; + private ExtendedCell lastCell; private boolean lastResultPartial; @@ -59,7 +59,7 @@ public BatchScanResultCache(int batch) { } private void recordLastResult(Result result) { - lastCell = result.rawCells()[result.rawCells().length - 1]; + lastCell = result.rawExtendedCells()[result.rawExtendedCells().length - 1]; lastResultPartial = result.mayHaveMoreCellsInRow(); } @@ -80,7 +80,7 @@ private Result regroupResults(Result result) { if (numCellsOfPartialResults < batch) { return null; } - Cell[] cells = new Cell[batch]; + ExtendedCell[] cells = new ExtendedCell[batch]; int cellCount = 0; boolean stale = false; for (;;) { @@ -91,8 +91,8 @@ private Result regroupResults(Result result) { // We have more cells than expected, so split the current result int len = batch - cellCount; System.arraycopy(r.rawCells(), 0, cells, cellCount, len); - Cell[] remainingCells = new Cell[r.size() - len]; - System.arraycopy(r.rawCells(), len, remainingCells, 0, r.size() - len); + ExtendedCell[] remainingCells = new ExtendedCell[r.size() - len]; + System.arraycopy(r.rawExtendedCells(), len, remainingCells, 0, r.size() - len); partialResults.addFirst( Result.create(remainingCells, r.getExists(), r.isStale(), r.mayHaveMoreCellsInRow())); break; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 84acc6e4d398..cdb85845f035 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -232,7 +233,7 @@ static long calcEstimatedSize(Result rs) { return estimatedHeapSizeOfResult; } - static Result filterCells(Result result, Cell keepCellsAfter) { + static Result filterCells(Result result, ExtendedCell keepCellsAfter) { if (keepCellsAfter == null) { // do not need to filter return result; @@ -241,7 +242,7 @@ static Result filterCells(Result result, Cell keepCellsAfter) { if (!PrivateCellUtil.matchingRows(keepCellsAfter, result.getRow(), 0, result.getRow().length)) { return result; } - Cell[] rawCells = result.rawCells(); + ExtendedCell[] rawCells = result.rawExtendedCells(); int index = Arrays.binarySearch(rawCells, keepCellsAfter, CellComparator.getInstance()::compareWithoutRow); if (index < 0) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index f97db8a116d6..af1d06a6ac47 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.security.access.Permission; @@ -171,10 +172,8 @@ public Delete addFamily(final byte[] family) { * @return this for invocation chaining */ public Delete addFamily(final byte[] family, final long timestamp) { - if (timestamp < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); - } - List list = getCellList(family); + checkTimestamp(timestamp); + List list = getCellList(family); if (!list.isEmpty()) { list.clear(); } @@ -190,10 +189,8 @@ public Delete addFamily(final byte[] family, final long timestamp) { * @return this for invocation chaining */ public Delete addFamilyVersion(final byte[] family, final long timestamp) { - if (timestamp < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); - } - List list = getCellList(family); + checkTimestamp(ts); + List list = getCellList(family); list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion)); return this; } @@ -218,10 +215,8 @@ public Delete addColumns(final byte[] family, final byte[] qualifier) { * @return this for invocation chaining */ public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) { - if (timestamp < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); - } - List list = getCellList(family); + checkTimestamp(ts); + List list = getCellList(family); list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn)); return this; } @@ -247,10 +242,8 @@ public Delete addColumn(final byte[] family, final byte[] qualifier) { * @return this for invocation chaining */ public Delete addColumn(byte[] family, byte[] qualifier, long timestamp) { - if (timestamp < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); - } - List list = getCellList(family); + checkTimestamp(ts); + List list = getCellList(family); KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.Delete); list.add(kv); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index aad853f8c06c..e1672cdff190 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.security.access.Permission; @@ -35,6 +36,8 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + /** * Used to perform Increment operations on a single row. *

    @@ -114,10 +117,8 @@ public Increment add(Cell cell) throws IOException { * @return the Increment object */ public Increment addColumn(byte[] family, byte[] qualifier, long amount) { - if (family == null) { - throw new IllegalArgumentException("family cannot be null"); - } - List list = getCellList(family); + Preconditions.checkArgument(family != null, "family cannot be null"); + List list = getCellList(family); KeyValue kv = createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount)); list.add(kv); return this; @@ -224,7 +225,7 @@ public String toString() { } sb.append(", families="); boolean moreThanOne = false; - for (Map.Entry> entry : this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { if (moreThanOne) { sb.append("), "); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index 0be0325d499f..b638aa48dc85 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -22,11 +22,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableMap; -import java.util.Optional; import java.util.TreeMap; import java.util.UUID; import java.util.stream.Collectors; @@ -34,10 +32,10 @@ import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.IndividualBytesFieldCell; import org.apache.hadoop.hbase.KeyValue; @@ -66,7 +64,7 @@ @InterfaceAudience.Public public abstract class Mutation extends OperationWithAttributes - implements Row, CellScannable, HeapSize { + implements Row, ExtendedCellScannable, HeapSize { public static final long MUTATION_OVERHEAD = ClassSize.align( // This ClassSize.OBJECT + @@ -96,13 +94,12 @@ public abstract class Mutation extends OperationWithAttributes private static final String RETURN_RESULTS = "_rr_"; // TODO: row should be final - protected byte[] row = null; + protected byte[] row; protected long ts = HConstants.LATEST_TIMESTAMP; protected Durability durability = Durability.USE_DEFAULT; - // TODO: familyMap should be final // A Map sorted by column family. - protected NavigableMap> familyMap; + protected final NavigableMap> familyMap; /** * empty construction. We need this empty construction to keep binary compatibility. @@ -115,7 +112,7 @@ protected Mutation(Mutation clone) { super(clone); this.row = clone.getRow(); this.ts = clone.getTimestamp(); - this.familyMap = clone.getFamilyCellMap().entrySet().stream() + this.familyMap = clone.familyMap.entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { throw new RuntimeException("collisions!!!"); }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); @@ -127,18 +124,23 @@ protected Mutation(Mutation clone) { * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ + @SuppressWarnings({ "unchecked", "rawtypes" }) protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { this.row = Preconditions.checkNotNull(row); - if (row.length == 0) { - throw new IllegalArgumentException("Row can't be empty"); - } + Preconditions.checkArgument(row.length > 0, "Row can't be empty"); this.ts = ts; - this.familyMap = Preconditions.checkNotNull(familyMap); + // We do not allow other Cell types in HBase so here we just do a simple cast + this.familyMap = (NavigableMap) Preconditions.checkNotNull(familyMap); } + /** + * For client users: You should only use the return value as a + * {@link org.apache.hadoop.hbase.CellScanner}, {@link ExtendedCellScanner} is marked as + * IA.Private which means there is no guarantee about its API stability. + */ @Override - public CellScanner cellScanner() { - return CellUtil.createCellScanner(getFamilyCellMap()); + public ExtendedCellScanner cellScanner() { + return PrivateCellUtil.createExtendedCellScanner(familyMap); } /** @@ -147,13 +149,8 @@ public CellScanner cellScanner() { * @param family column family * @return a list of Cell objects, returns an empty list if one doesn't exist. */ - List getCellList(byte[] family) { - List list = getFamilyCellMap().get(family); - if (list == null) { - list = new ArrayList<>(); - getFamilyCellMap().put(family, list); - } - return list; + List getCellList(byte[] family) { + return familyMap.computeIfAbsent(family, k -> new ArrayList<>()); } /** @@ -218,7 +215,7 @@ public Map toMap(int maxCols) { map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : familyMap.entrySet()) { // map from this family to details for each cell affected within the family List> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); @@ -227,7 +224,7 @@ public Map toMap(int maxCols) { continue; } // add details for each cell - for (Cell cell : entry.getValue()) { + for (ExtendedCell cell : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -250,7 +247,7 @@ public Map toMap(int maxCols) { return map; } - private static Map cellToStringMap(Cell c) { + private static Map cellToStringMap(ExtendedCell c) { Map stringMap = new HashMap<>(); stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); @@ -283,8 +280,9 @@ public Durability getDurability() { /** * Method for retrieving the put's familyMap */ + @SuppressWarnings({ "unchecked", "rawtypes" }) public NavigableMap> getFamilyCellMap() { - return this.familyMap; + return (NavigableMap) this.familyMap; } /** @@ -517,9 +515,7 @@ protected long extraHeapSize() { * Set the timestamp of the delete. */ public Mutation setTimestamp(long timestamp) { - if (timestamp < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); - } + checkTimestamp(timestamp); this.ts = timestamp; return this; } @@ -603,7 +599,7 @@ public List get(byte[] family, byte[] qualifier) { */ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS, boolean ignoreValue) { - List list = getCellList(family); + List list = getCellList(family); if (list.isEmpty()) { return false; } @@ -613,7 +609,7 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, bo // F T => 2 // F F => 1 if (!ignoreTS && !ignoreValue) { - for (Cell cell : list) { + for (ExtendedCell cell : list) { if ( CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) && CellUtil.matchingValue(cell, value) && cell.getTimestamp() == ts @@ -692,6 +688,10 @@ static void checkRow(ByteBuffer row) { } } + protected final void checkTimestamp(long ts) { + Preconditions.checkArgument(ts >= 0, "Timestamp cannot be negative. ts=%s", ts); + } + Mutation add(Cell cell) throws IOException { // Checking that the row of the kv is the same as the mutation // TODO: It is fraught with risk if user pass the wrong row. @@ -714,9 +714,9 @@ Mutation add(Cell cell) throws IOException { } if (cell instanceof ExtendedCell) { - getCellList(family).add(cell); + getCellList(family).add((ExtendedCell) cell); } else { - getCellList(family).add(new CellWrapper(cell)); + throw new IllegalArgumentException("Unsupported cell type: " + cell.getClass().getName()); } return this; } @@ -743,7 +743,7 @@ public CellBuilder getCellBuilder() { * @param cellType e.g Cell.Type.Put * @return CellBuilder which already has relevant Type and Row set. */ - protected CellBuilder getCellBuilder(CellBuilderType cellBuilderType, Cell.Type cellType) { + protected final CellBuilder getCellBuilder(CellBuilderType cellBuilderType, Cell.Type cellType) { CellBuilder builder = CellBuilderFactory.create(cellBuilderType).setRow(row).setType(cellType); return new CellBuilder() { @Override @@ -818,158 +818,4 @@ public CellBuilder clear() { } }; } - - private static final class CellWrapper implements ExtendedCell { - private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT // object header - + KeyValue.TIMESTAMP_SIZE // timestamp - + Bytes.SIZEOF_LONG // sequence id - + 1 * ClassSize.REFERENCE); // references to cell - private final Cell cell; - private long sequenceId; - private long timestamp; - - CellWrapper(Cell cell) { - assert !(cell instanceof ExtendedCell); - this.cell = cell; - this.sequenceId = cell.getSequenceId(); - this.timestamp = cell.getTimestamp(); - } - - @Override - public void setSequenceId(long seqId) { - sequenceId = seqId; - } - - @Override - public void setTimestamp(long ts) { - timestamp = ts; - } - - @Override - public void setTimestamp(byte[] ts) { - timestamp = Bytes.toLong(ts); - } - - @Override - public long getSequenceId() { - return sequenceId; - } - - @Override - public byte[] getValueArray() { - return cell.getValueArray(); - } - - @Override - public int getValueOffset() { - return cell.getValueOffset(); - } - - @Override - public int getValueLength() { - return cell.getValueLength(); - } - - @Override - public byte[] getTagsArray() { - return cell.getTagsArray(); - } - - @Override - public int getTagsOffset() { - return cell.getTagsOffset(); - } - - @Override - public int getTagsLength() { - return cell.getTagsLength(); - } - - @Override - public byte[] getRowArray() { - return cell.getRowArray(); - } - - @Override - public int getRowOffset() { - return cell.getRowOffset(); - } - - @Override - public short getRowLength() { - return cell.getRowLength(); - } - - @Override - public byte[] getFamilyArray() { - return cell.getFamilyArray(); - } - - @Override - public int getFamilyOffset() { - return cell.getFamilyOffset(); - } - - @Override - public byte getFamilyLength() { - return cell.getFamilyLength(); - } - - @Override - public byte[] getQualifierArray() { - return cell.getQualifierArray(); - } - - @Override - public int getQualifierOffset() { - return cell.getQualifierOffset(); - } - - @Override - public int getQualifierLength() { - return cell.getQualifierLength(); - } - - @Override - public long getTimestamp() { - return timestamp; - } - - @Override - public byte getTypeByte() { - return cell.getTypeByte(); - } - - @Override - public Optional getTag(byte type) { - return PrivateCellUtil.getTag(cell, type); - } - - @Override - public Iterator getTags() { - return PrivateCellUtil.tagsIterator(cell); - } - - @Override - public byte[] cloneTags() { - return PrivateCellUtil.cloneTags(cell); - } - - private long heapOverhead() { - return FIXED_OVERHEAD + ClassSize.ARRAY // row - + getFamilyLength() == 0 - ? 0 - : ClassSize.ARRAY + getQualifierLength() == 0 ? 0 - : ClassSize.ARRAY + getValueLength() == 0 ? 0 - : ClassSize.ARRAY + getTagsLength() == 0 ? 0 - : ClassSize.ARRAY; - } - - @Override - public long heapSize() { - return heapOverhead() + ClassSize.align(getRowLength()) + ClassSize.align(getFamilyLength()) - + ClassSize.align(getQualifierLength()) + ClassSize.align(getValueLength()) - + ClassSize.align(getTagsLength()); - } - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java index 56a8dd19fcc5..08293ab83f81 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.client; +import java.util.List; +import java.util.NavigableMap; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -38,4 +41,21 @@ public static void setMvccReadPoint(Scan scan, long mvccReadPoint) { public static long getMvccReadPoint(Scan scan) { return scan.getMvccReadPoint(); } + + public static ExtendedCell[] getExtendedRawCells(Result result) { + return result.rawExtendedCells(); + } + + public static NavigableMap> getExtendedFamilyCellMap(Mutation m) { + return m.familyMap; + } + + public static Result createResult(ExtendedCell[] cells) { + return Result.create(cells); + } + + public static Result createResult(ExtendedCell[] cells, Boolean exists, boolean stale, + boolean mayHaveMoreCellsInRow) { + return Result.create(cells, exists, stale, mayHaveMoreCellsInRow); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index dc470069f90c..b97a023c3899 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.HeapSize; @@ -93,11 +94,9 @@ public Put(ByteBuffer row) { */ public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) { checkRow(rowArray, rowOffset, rowLength); + checkTimestamp(ts); this.row = Bytes.copy(rowArray, rowOffset, rowLength); this.ts = ts; - if (ts < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); - } } /** @@ -119,9 +118,7 @@ public Put(byte[] row, boolean rowIsImmutable) { */ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Check and set timestamp - if (ts < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); - } + checkTimestamp(ts); this.ts = ts; // Deal with row according to rowIsImmutable @@ -171,10 +168,8 @@ public Put addColumn(byte[] family, byte[] qualifier, byte[] value) { * @param value column value */ public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) { - if (ts < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); - } - List list = getCellList(family); + checkTimestamp(ts); + List list = getCellList(family); KeyValue kv = createPutKeyValue(family, qualifier, ts, value); list.add(kv); return this; @@ -189,10 +184,8 @@ public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) { * @param value column value */ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { - if (ts < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); - } - List list = getCellList(family); + checkTimestamp(ts); + List list = getCellList(family); KeyValue kv = createPutKeyValue(family, qualifier, ts, value, null); list.add(kv); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index ec1d3f2b1fab..f4ac525e5b93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -32,9 +32,11 @@ import java.util.TreeMap; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -70,8 +72,8 @@ * {@link #copyFrom(Result)} */ @InterfaceAudience.Public -public class Result implements CellScannable, CellScanner { - private Cell[] cells; +public class Result implements ExtendedCellScannable, ExtendedCellScanner { + private ExtendedCell[] cells; private Boolean exists; // if the query was just to check existence. private boolean stale = false; @@ -142,7 +144,8 @@ public static Result create(List cells, Boolean exists, boolean if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); } - return new Result(cells.toArray(new Cell[cells.size()]), null, stale, mayHaveMoreCellsInRow); + return new Result(cells.toArray(new ExtendedCell[cells.size()]), null, stale, + mayHaveMoreCellsInRow); } /** @@ -159,6 +162,26 @@ public static Result create(Cell[] cells, Boolean exists, boolean stale) { } public static Result create(Cell[] cells, Boolean exists, boolean stale, + boolean mayHaveMoreCellsInRow) { + if (exists != null) { + return new Result(null, exists, stale, mayHaveMoreCellsInRow); + } + ExtendedCell[] extendCells = cells instanceof ExtendedCell[] + ? (ExtendedCell[]) cells + : Arrays.copyOf(cells, cells.length, ExtendedCell[].class); + return new Result(extendCells, null, stale, mayHaveMoreCellsInRow); + } + + // prefer these below methods inside hbase to avoid casting or copying + static Result create(ExtendedCell[] cells) { + return create(cells, null, false); + } + + static Result create(ExtendedCell[] cells, Boolean exists, boolean stale) { + return create(cells, exists, stale, false); + } + + static Result create(ExtendedCell[] cells, Boolean exists, boolean stale, boolean mayHaveMoreCellsInRow) { if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); @@ -176,7 +199,8 @@ private Result(Cursor cursor) { } /** Private ctor. Use {@link #create(Cell[])}. */ - private Result(Cell[] cells, Boolean exists, boolean stale, boolean mayHaveMoreCellsInRow) { + private Result(ExtendedCell[] cells, Boolean exists, boolean stale, + boolean mayHaveMoreCellsInRow) { this.cells = cells; this.exists = exists; this.stale = stale; @@ -212,6 +236,10 @@ public Cell[] rawCells() { return cells; } + ExtendedCell[] rawExtendedCells() { + return cells; + } + /** * Create a sorted list of the Cell's in this result. Since HBase 0.20.5 this is equivalent to * raw(). @@ -263,7 +291,7 @@ private byte[] notNullBytes(final byte[] bytes) { } } - protected int binarySearch(final Cell[] kvs, final byte[] family, final byte[] qualifier) { + private int binarySearch(final Cell[] kvs, final byte[] family, final byte[] qualifier) { byte[] familyNotNull = notNullBytes(family); byte[] qualifierNotNull = notNullBytes(qualifier); Cell searchTerm = PrivateCellUtil.createFirstOnRow(kvs[0].getRowArray(), kvs[0].getRowOffset(), @@ -294,7 +322,7 @@ protected int binarySearch(final Cell[] kvs, final byte[] family, final byte[] q * @param qlength qualifier length * @return the index where the value was found, or -1 otherwise */ - protected int binarySearch(final Cell[] kvs, final byte[] family, final int foffset, + private int binarySearch(final Cell[] kvs, final byte[] family, final int foffset, final int flength, final byte[] qualifier, final int qoffset, final int qlength) { double keyValueSize = @@ -734,12 +762,12 @@ public static void compareResults(Result res1, Result res2, boolean verbose) thr + ", " + res1.size() + " cells are compared to " + res2.size() + " cells"); } } - Cell[] ourKVs = res1.rawCells(); - Cell[] replicatedKVs = res2.rawCells(); + ExtendedCell[] ourKVs = res1.cells; + ExtendedCell[] replicatedKVs = res2.cells; for (int i = 0; i < res1.size(); i++) { if ( !ourKVs[i].equals(replicatedKVs[i]) || !CellUtil.matchingValue(ourKVs[i], replicatedKVs[i]) - || !CellUtil.matchingTags(ourKVs[i], replicatedKVs[i]) + || !PrivateCellUtil.matchingTags(ourKVs[i], replicatedKVs[i]) ) { if (verbose) { throw new Exception("This result was different: " + res1 + " compared to " + res2); @@ -824,18 +852,29 @@ public void copyFrom(Result other) { this.cells = other.cells; } + /** + * For client users: You should only use the return value as a + * {@link org.apache.hadoop.hbase.CellScanner}, {@link ExtendedCellScanner} is marked as + * IA.Private which means there is no guarantee about its API stability. + */ @Override - public CellScanner cellScanner() { + public ExtendedCellScanner cellScanner() { // Reset this.cellScannerIndex = INITIAL_CELLSCANNER_INDEX; return this; } + /** + * For client users: You should only use the return value as a {@link Cell}, {@link ExtendedCell} + * is marked as IA.Private which means there is no guarantee about its API stability. + */ @Override - public Cell current() { + public ExtendedCell current() { if ( isEmpty() || cellScannerIndex == INITIAL_CELLSCANNER_INDEX || cellScannerIndex >= cells.length - ) return null; + ) { + return null; + } return this.cells[cellScannerIndex]; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index 980e708d235c..45a9b29daba2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -22,7 +22,7 @@ import java.util.Map; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -44,7 +44,7 @@ class Call { * Optionally has cells when making call. Optionally has cells set on response. Used passing cells * to the rpc and receiving the response. */ - CellScanner cells; + ExtendedCellScanner cells; @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "Direct access is only allowed after done") Message response; // value, null if error @@ -63,9 +63,10 @@ class Call { final Span span; Timeout timeoutTask; - Call(int id, final Descriptors.MethodDescriptor md, Message param, final CellScanner cells, - final Message responseDefaultType, int timeout, int priority, Map attributes, - RpcCallback callback, MetricsConnection.CallStats callStats) { + Call(int id, final Descriptors.MethodDescriptor md, Message param, + final ExtendedCellScanner cells, final Message responseDefaultType, int timeout, int priority, + Map attributes, RpcCallback callback, + MetricsConnection.CallStats callStats) { this.param = param; this.md = md; this.cells = cells; @@ -136,7 +137,7 @@ public void setException(IOException error) { * @param response return value of the call. * @param cells Can be null */ - public void setResponse(Message response, final CellScanner cells) { + public void setResponse(Message response, final ExtendedCellScanner cells) { synchronized (this) { if (done) { return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java index e7364ca3b429..a52aa8693f68 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java @@ -26,8 +26,8 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffInputStream; @@ -110,7 +110,7 @@ public int size() { * been flipped and is ready for reading. Use limit to find total size. */ public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, - final CellScanner cellScanner) throws IOException { + final ExtendedCellScanner cellScanner) throws IOException { ByteBufferOutputStreamSupplier supplier = new ByteBufferOutputStreamSupplier(); if (buildCellBlock(codec, compressor, cellScanner, supplier)) { ByteBuffer bb = supplier.baos.getByteBuffer(); @@ -144,8 +144,8 @@ public int size() { } } - public ByteBuf buildCellBlock(Codec codec, CompressionCodec compressor, CellScanner cellScanner, - ByteBufAllocator alloc) throws IOException { + public ByteBuf buildCellBlock(Codec codec, CompressionCodec compressor, + ExtendedCellScanner cellScanner, ByteBufAllocator alloc) throws IOException { ByteBufOutputStreamSupplier supplier = new ByteBufOutputStreamSupplier(alloc); if (buildCellBlock(codec, compressor, cellScanner, supplier)) { return supplier.buf; @@ -155,7 +155,7 @@ public ByteBuf buildCellBlock(Codec codec, CompressionCodec compressor, CellScan } private boolean buildCellBlock(final Codec codec, final CompressionCodec compressor, - final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException { + final ExtendedCellScanner cellScanner, OutputStreamSupplier supplier) throws IOException { if (cellScanner == null) { return false; } @@ -171,7 +171,7 @@ private boolean buildCellBlock(final Codec codec, final CompressionCodec compres return true; } - private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, + private void encodeCellsTo(OutputStream os, ExtendedCellScanner cellScanner, Codec codec, CompressionCodec compressor) throws IOException { Compressor poolCompressor = null; try { @@ -212,7 +212,7 @@ private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec * @throws IOException if encoding the cells fail */ public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionCodec compressor, - CellScanner cellScanner, ByteBuffAllocator allocator) throws IOException { + ExtendedCellScanner cellScanner, ByteBuffAllocator allocator) throws IOException { if (cellScanner == null) { return null; } @@ -235,7 +235,7 @@ public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionC * @return CellScanner to work against the content of cellBlock * @throws IOException if encoding fails */ - public CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor, + public ExtendedCellScanner createCellScanner(final Codec codec, final CompressionCodec compressor, final byte[] cellBlock) throws IOException { // Use this method from Client side to create the CellScanner if (compressor != null) { @@ -258,7 +258,7 @@ public CellScanner createCellScanner(final Codec codec, final CompressionCodec c * out of the CellScanner will share the same ByteBuffer being passed. * @throws IOException if cell encoding fails */ - public CellScanner createCellScannerReusingBuffers(final Codec codec, + public ExtendedCellScanner createCellScannerReusingBuffers(final Codec codec, final CompressionCodec compressor, ByteBuff cellBlock) throws IOException { // Use this method from HRS to create the CellScanner // If compressed, decompress it first before passing it on else we will leak compression diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java index 2b8839bf8462..5b220a24ec56 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.Map; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -74,12 +74,12 @@ public void notifyOnCancel(RpcCallback callback) { } @Override - public CellScanner cellScanner() { + public ExtendedCellScanner cellScanner() { return delegate.cellScanner(); } @Override - public void setCellScanner(CellScanner cellScanner) { + public void setCellScanner(ExtendedCellScanner cellScanner) { delegate.setCellScanner(cellScanner); } @@ -134,7 +134,7 @@ public IOException getFailed() { } @Override - public void setDone(CellScanner cellScanner) { + public void setDone(ExtendedCellScanner cellScanner) { delegate.setDone(cellScanner); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java index 4d3e038bb5ec..8fe44ca59cfc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java @@ -19,8 +19,8 @@ import java.io.IOException; import java.util.Map; -import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -42,13 +42,13 @@ @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.REPLICATION }) @InterfaceStability.Evolving -public interface HBaseRpcController extends RpcController, CellScannable { +public interface HBaseRpcController extends RpcController, ExtendedCellScannable { /** * Only used to send cells to rpc server, the returned cells should be set by - * {@link #setDone(CellScanner)}. + * {@link #setDone(ExtendedCellScanner)}. */ - void setCellScanner(CellScanner cellScanner); + void setCellScanner(ExtendedCellScanner cellScanner); /** * Set the priority for this operation. @@ -97,7 +97,7 @@ public interface HBaseRpcController extends RpcController, CellScannable { * IMPORTANT: always call this method if the call finished without any exception to tell * the {@code HBaseRpcController} that we are done. */ - void setDone(CellScanner cellScanner); + void setDone(ExtendedCellScanner cellScanner); /** * A little different from the basic RpcController: diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java index 54e9310b5ae7..0667ce2ee627 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java @@ -22,10 +22,10 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -72,28 +72,29 @@ public class HBaseRpcControllerImpl implements HBaseRpcController { * sometimes the scanner is backed by a List of Cells and other times, it is backed by an encoded * block that implements CellScanner. */ - private CellScanner cellScanner; + private ExtendedCellScanner cellScanner; private Map requestAttributes = Collections.emptyMap(); public HBaseRpcControllerImpl() { - this(null, (CellScanner) null); + this(null, (ExtendedCellScanner) null); } /** * Used server-side. Clients should go via {@link RpcControllerFactory} */ - public HBaseRpcControllerImpl(final CellScanner cellScanner) { + public HBaseRpcControllerImpl(final ExtendedCellScanner cellScanner) { this(null, cellScanner); } - HBaseRpcControllerImpl(RegionInfo regionInfo, final CellScanner cellScanner) { + HBaseRpcControllerImpl(RegionInfo regionInfo, final ExtendedCellScanner cellScanner) { this.cellScanner = cellScanner; this.regionInfo = regionInfo; } - HBaseRpcControllerImpl(RegionInfo regionInfo, final List cellIterables) { - this.cellScanner = cellIterables == null ? null : CellUtil.createCellScanner(cellIterables); + HBaseRpcControllerImpl(RegionInfo regionInfo, final List cellIterables) { + this.cellScanner = + cellIterables == null ? null : PrivateCellUtil.createExtendedCellScanner(cellIterables); this.regionInfo = null; } @@ -109,14 +110,14 @@ public RegionInfo getRegionInfo() { /** Returns One-shot cell scanner (you cannot back it up and restart) */ @Override - public CellScanner cellScanner() { + public ExtendedCellScanner cellScanner() { return cellScanner; } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "The only possible race method is startCancel") @Override - public void setCellScanner(final CellScanner cellScanner) { + public void setCellScanner(final ExtendedCellScanner cellScanner) { this.cellScanner = cellScanner; } @@ -240,7 +241,7 @@ public synchronized IOException getFailed() { } @Override - public synchronized void setDone(CellScanner cellScanner) { + public synchronized void setDone(ExtendedCellScanner cellScanner) { if (done) { return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 8017e99ec4ff..dbdb0e2037f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -34,7 +34,7 @@ import java.util.function.Consumer; import javax.security.sasl.SaslException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; @@ -382,7 +382,7 @@ private void finishCall(ResponseHeader respo } else { value = null; } - CellScanner cellBlockScanner; + ExtendedCellScanner cellBlockScanner; if (responseHeader.hasCellBlockMeta()) { int size = responseHeader.getCellBlockMeta().getLength(); // Maybe we could read directly from the ByteBuf. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java index a256769de703..1977f09abb11 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java @@ -19,8 +19,8 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -54,20 +54,20 @@ public HBaseRpcController newController() { return new HBaseRpcControllerImpl(); } - public HBaseRpcController newController(CellScanner cellScanner) { + public HBaseRpcController newController(ExtendedCellScanner cellScanner) { return new HBaseRpcControllerImpl(null, cellScanner); } - public HBaseRpcController newController(RegionInfo regionInfo, CellScanner cellScanner) { + public HBaseRpcController newController(RegionInfo regionInfo, ExtendedCellScanner cellScanner) { return new HBaseRpcControllerImpl(regionInfo, cellScanner); } - public HBaseRpcController newController(final List cellIterables) { + public HBaseRpcController newController(final List cellIterables) { return new HBaseRpcControllerImpl(null, cellIterables); } public HBaseRpcController newController(RegionInfo regionInfo, - final List cellIterables) { + final List cellIterables) { return new HBaseRpcControllerImpl(regionInfo, cellIterables); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d48b8b73a316..d3672c5e841b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -245,7 +246,7 @@ private ProtobufUtil() { * Many results are simple: no cell, exists true or false. To save on object creations, we reuse * them across calls. */ - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; + private final static ExtendedCell[] EMPTY_CELL_ARRAY = new ExtendedCell[0]; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); @@ -2024,7 +2025,7 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { return UnsafeByteOperations.unsafeWrap(dup); } - public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, + public static ExtendedCell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, boolean decodeTags) { ExtendedCellBuilder builder = cellBuilder.clear().setRow(cell.getRow().toByteArray()) .setFamily(cell.getFamily().toByteArray()).setQualifier(cell.getQualifier().toByteArray()) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index ce12aaea0d24..b98094ad92a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -27,11 +27,11 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterMetricsBuilder; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScannable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; @@ -469,7 +469,7 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( * RowMutations/CheckAndMutate within the original list of actions */ public static void buildNoDataRegionActions(final byte[] regionName, - final Iterable actions, final List cells, + final Iterable actions, final List cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map indexMap) throws IOException { @@ -609,17 +609,19 @@ public static void buildNoDataRegionActions(final byte[] regionName, } } - private static void buildNoDataRegionAction(final Put put, final List cells, - final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + private static void buildNoDataRegionAction(final Put put, + final List cells, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(put); regionActionBuilder.addAction(actionBuilder .setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); } - private static void buildNoDataRegionAction(final Delete delete, final List cells, - final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + private static void buildNoDataRegionAction(final Delete delete, + final List cells, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { int size = delete.size(); // Note that a legitimate Delete may have a size of zero; i.e. a Delete that has nothing // in it but the row to delete. In this case, the current implementation does not make @@ -637,18 +639,18 @@ private static void buildNoDataRegionAction(final Delete delete, final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + final List cells, long nonce, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, + final MutationProto.Builder mutationBuilder) throws IOException { cells.add(increment); regionActionBuilder.addAction(actionBuilder.setMutation( ProtobufUtil.toMutationNoData(MutationType.INCREMENT, increment, mutationBuilder, nonce))); } - private static void buildNoDataRegionAction(final Append append, final List cells, - long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + private static void buildNoDataRegionAction(final Append append, + final List cells, long nonce, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, + final MutationProto.Builder mutationBuilder) throws IOException { cells.add(append); regionActionBuilder.addAction(actionBuilder.setMutation( ProtobufUtil.toMutationNoData(MutationType.APPEND, append, mutationBuilder, nonce))); @@ -656,9 +658,9 @@ private static void buildNoDataRegionAction(final Append append, final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + final List cells, long nonce, + final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, + final MutationProto.Builder mutationBuilder) throws IOException { boolean ret = false; for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java index 718ca05b92f3..5da59b144a5f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java @@ -26,7 +26,9 @@ import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -124,13 +126,13 @@ public void testPutCopyConstructor() throws IOException { private void assertEquals(Mutation origin, Mutation clone) { Assert.assertEquals(origin.getFamilyCellMap().size(), clone.getFamilyCellMap().size()); for (byte[] family : origin.getFamilyCellMap().keySet()) { - List originCells = origin.getCellList(family); - List cloneCells = clone.getCellList(family); + List originCells = origin.getCellList(family); + List cloneCells = clone.getCellList(family); Assert.assertEquals(originCells.size(), cloneCells.size()); for (int i = 0; i != cloneCells.size(); ++i) { - Cell originCell = originCells.get(i); - Cell cloneCell = cloneCells.get(i); - assertTrue(CellUtil.equals(originCell, cloneCell)); + ExtendedCell originCell = originCells.get(i); + ExtendedCell cloneCell = cloneCells.get(i); + assertTrue(PrivateCellUtil.equals(originCell, cloneCell)); assertTrue(CellUtil.matchingValue(originCell, cloneCell)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java index fccea923635a..c3a145ea6f33 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java @@ -21,18 +21,17 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Arrays; import org.apache.commons.lang3.time.StopWatch; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.KeyValueCodec; -import org.apache.hadoop.hbase.io.SizedCellScanner; +import org.apache.hadoop.hbase.io.SizedExtendedCellScanner; import org.apache.hadoop.hbase.nio.SingleByteBuff; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -78,10 +77,9 @@ static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final Codec codec, final CompressionCodec compressor, final int count, final int size, final boolean sized) throws IOException { - Cell[] cells = getCells(count, size); - CellScanner cellScanner = sized - ? getSizedCellScanner(cells) - : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); + ExtendedCell[] cells = getCells(count, size); + ExtendedCellScanner cellScanner = + sized ? getSizedCellScanner(cells) : PrivateCellUtil.createExtendedCellScanner(cells); ByteBuffer bb = builder.buildCellBlock(codec, compressor, cellScanner); cellScanner = builder.createCellScannerReusingBuffers(codec, compressor, new SingleByteBuff(bb)); @@ -92,21 +90,21 @@ static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final assertEquals(count, i); } - static CellScanner getSizedCellScanner(final Cell[] cells) { + static ExtendedCellScanner getSizedCellScanner(final ExtendedCell[] cells) { int size = -1; for (Cell cell : cells) { size += PrivateCellUtil.estimatedSerializedSizeOf(cell); } final int totalSize = ClassSize.align(size); - final CellScanner cellScanner = CellUtil.createCellScanner(cells); - return new SizedCellScanner() { + final ExtendedCellScanner cellScanner = PrivateCellUtil.createExtendedCellScanner(cells); + return new SizedExtendedCellScanner() { @Override public long heapSize() { return totalSize; } @Override - public Cell current() { + public ExtendedCell current() { return cellScanner.current(); } @@ -117,12 +115,12 @@ public boolean advance() throws IOException { }; } - static Cell[] getCells(final int howMany) { + static ExtendedCell[] getCells(final int howMany) { return getCells(howMany, 1024); } - static Cell[] getCells(final int howMany, final int valueSize) { - Cell[] cells = new Cell[howMany]; + static ExtendedCell[] getCells(final int howMany, final int valueSize) { + ExtendedCell[] cells = new ExtendedCell[howMany]; byte[] value = new byte[valueSize]; for (int i = 0; i < howMany; i++) { byte[] index = Bytes.toBytes(i); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java index a0b68646b145..a1ef458b3b48 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java @@ -24,8 +24,10 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -43,7 +45,7 @@ public class TestHBaseRpcControllerImpl { @Test public void testListOfCellScannerables() throws IOException { final int count = 10; - List cells = new ArrayList<>(count); + List cells = new ArrayList<>(count); for (int i = 0; i < count; i++) { cells.add(createCell(i)); @@ -64,16 +66,16 @@ public void testListOfCellScannerables() throws IOException { * @param index the index of the cell to use as its value * @return A faked out 'Cell' that does nothing but return index as its value */ - static CellScannable createCell(final int index) { - return new CellScannable() { + static ExtendedCellScannable createCell(final int index) { + return new ExtendedCellScannable() { @Override - public CellScanner cellScanner() { - return new CellScanner() { + public ExtendedCellScanner cellScanner() { + return new ExtendedCellScanner() { @Override - public Cell current() { + public ExtendedCell current() { // Fake out a Cell. All this Cell has is a value that is an int in size and equal // to the above 'index' param serialized as an int. - return new Cell() { + return new ExtendedCell() { @Override public long heapSize() { return 0; @@ -180,6 +182,18 @@ public byte[] getTagsArray() { public Type getType() { return null; } + + @Override + public void setSequenceId(long seqId) throws IOException { + } + + @Override + public void setTimestamp(long ts) throws IOException { + } + + @Override + public void setTimestamp(byte[] ts) throws IOException { + } }; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 2b4380dfbb6d..acc561812853 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -505,7 +506,7 @@ public void testCellConversionWithTags() { CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); assertNotNull(protoCell); - Cell decodedCell = getCellFromProtoResult(protoCell, true); + ExtendedCell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); assertEquals(1, decodedTags.size()); Tag decodedTag = decodedTags.get(0); @@ -525,7 +526,7 @@ private Cell getCellWithTags() { return cellBuilder.build(); } - private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { + private ExtendedCell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { ExtendedCellBuilder decodedBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); return ProtobufUtil.toCell(decodedBuilder, protoCell, decodeTags); @@ -542,7 +543,7 @@ public void testCellConversionWithoutTags() { CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); assertNotNull(protoCell); - Cell decodedCell = getCellFromProtoResult(protoCell, false); + ExtendedCell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); assertEquals(0, decodedTags.size()); } @@ -558,7 +559,7 @@ public void testTagEncodeFalseDecodeTrue() { CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); assertNotNull(protoCell); - Cell decodedCell = getCellFromProtoResult(protoCell, true); + ExtendedCell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); assertEquals(0, decodedTags.size()); } @@ -574,7 +575,7 @@ public void testTagEncodeTrueDecodeFalse() { CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); assertNotNull(protoCell); - Cell decodedCell = getCellFromProtoResult(protoCell, false); + ExtendedCell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); assertEquals(0, decodedTags.size()); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java index 749f6b46782c..45e6f2d11d45 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java @@ -328,10 +328,10 @@ public ExtendedCell deepClone() { */ @Override public boolean equals(Object other) { - if (!(other instanceof Cell)) { + if (!(other instanceof ExtendedCell)) { return false; } - return CellUtil.equals(this, (Cell) other); + return PrivateCellUtil.equals(this, (ExtendedCell) other); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index a94c3679f478..0cd0905cc3a6 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -761,7 +761,7 @@ public int compareTimestamps(final long ltimestamp, final long rtimestamp) { } @Override - public Comparator getSimpleComparator() { + public Comparator getSimpleComparator() { return this; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 85f23550efc7..10213b143632 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -309,10 +309,14 @@ public Cell current() { public boolean advance() throws IOException { while (true) { if (this.cellScanner == null) { - if (!this.iterator.hasNext()) return false; + if (!this.iterator.hasNext()) { + return false; + } this.cellScanner = this.iterator.next().cellScanner(); } - if (this.cellScanner.advance()) return true; + if (this.cellScanner.advance()) { + return true; + } this.cellScanner = null; } } @@ -357,13 +361,17 @@ public static CellScanner createCellScanner(final Cell[] cellArray) { @Override public Cell current() { - if (cells == null) return null; + if (cells == null) { + return null; + } return (index < 0) ? null : this.cells[index]; } @Override public boolean advance() { - if (cells == null) return false; + if (cells == null) { + return false; + } return ++index < this.cells.length; } }; @@ -549,8 +557,13 @@ public static boolean matchingValue(final Cell left, final byte[] buf) { buf.length); } + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. Tags are now internal only, you should not + * try to check it through the {@link Cell} interface. + */ + @Deprecated public static boolean matchingTags(final Cell left, final Cell right) { - return PrivateCellUtil.matchingTags(left, right, left.getTagsLength(), right.getTagsLength()); + return PrivateCellUtil.matchingTags((ExtendedCell) left, (ExtendedCell) right); } /** @@ -662,7 +675,7 @@ public static String toString(Cell cell, boolean verbose) { public static boolean equals(Cell a, Cell b) { return matchingRows(a, b) && matchingFamily(a, b) && matchingQualifier(a, b) - && matchingTimestamp(a, b) && PrivateCellUtil.matchingType(a, b); + && matchingTimestamp(a, b) && a.getTypeByte() == b.getTypeByte(); } public static boolean matchingTimestamp(Cell a, Cell b) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScannable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScannable.java new file mode 100644 index 000000000000..c3772f166221 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScannable.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * We use this class in HBase internally for getting {@link ExtendedCell} directly without casting. + *

    + * In general, all {@link Cell}s in HBase should and must be {@link ExtendedCell}. + *

    + * See HBASE-28684 and related issues for more details. + * @see CellScannable + * @see ExtendedCellScanner + * @see ExtendedCell + */ +@InterfaceAudience.Private +public interface ExtendedCellScannable extends CellScannable { + + @Override + ExtendedCellScanner cellScanner(); +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScanner.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScanner.java new file mode 100644 index 000000000000..f7011291d9ce --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellScanner.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * We use this class in HBase internally for getting {@link ExtendedCell} directly without casting. + *

    + * In general, all {@link Cell}s in HBase should and must be {@link ExtendedCell}. + *

    + * See HBASE-28684 and related issues for more details. + * @see CellScanner + * @see ExtendedCellScannable + * @see ExtendedCell + */ +@InterfaceAudience.Private +public interface ExtendedCellScanner extends CellScanner { + + @Override + ExtendedCell current(); +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 89e91ca80361..a87a5214fadf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -978,10 +978,10 @@ private static byte[] createByteArray(final byte[] row, final int roffset, final */ @Override public boolean equals(Object other) { - if (!(other instanceof Cell)) { + if (!(other instanceof ExtendedCell)) { return false; } - return CellUtil.equals(this, (Cell) other); + return PrivateCellUtil.equals(this, (ExtendedCell) other); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java index ed3687e9ed4d..eacd819a23ee 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java @@ -63,9 +63,13 @@ public static boolean containsIgnoreMvccVersion(Collection kvCol for (Cell kv1 : kvCollection1) { boolean found = false; for (Cell kv2 : kvCollection2) { - if (PrivateCellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true; + if (PrivateCellUtil.equalsIgnoreMvccVersion((ExtendedCell) kv1, (ExtendedCell) kv2)) { + found = true; + } + } + if (!found) { + return false; } - if (!found) return false; } return true; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index 4b61688abc28..6c8f2e6e4edb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -88,7 +88,7 @@ public static int totalLengthWithMvccVersion(final Iterable /**************** copy the cell to create a new keyvalue *********************/ - public static KeyValue copyToNewKeyValue(final Cell cell) { + public static KeyValue copyToNewKeyValue(final ExtendedCell cell) { byte[] bytes = copyToNewByteArray(cell); KeyValue kvCell = new KeyValue(bytes, 0, bytes.length); kvCell.setSequenceId(cell.getSequenceId()); @@ -120,7 +120,7 @@ public static KeyValue toNewKeyCell(final ExtendedCell cell) { return kv; } - public static byte[] copyToNewByteArray(final Cell cell) { + public static byte[] copyToNewByteArray(final ExtendedCell cell) { // Cell#getSerializedSize returns the serialized size of the Source cell, which may // not serialize all fields. We are constructing a KeyValue backing array here, // which does include all fields, and must allocate accordingly. @@ -133,7 +133,7 @@ public static byte[] copyToNewByteArray(final Cell cell) { return backingBytes; } - public static int appendKeyTo(final Cell cell, final byte[] output, final int offset) { + public static int appendKeyTo(final ExtendedCell cell, final byte[] output, final int offset) { int nextOffset = offset; nextOffset = Bytes.putShort(output, nextOffset, cell.getRowLength()); nextOffset = CellUtil.copyRowTo(cell, output, nextOffset); @@ -147,7 +147,8 @@ public static int appendKeyTo(final Cell cell, final byte[] output, final int of /**************** copy key and value *********************/ - public static int appendToByteArray(Cell cell, byte[] output, int offset, boolean withTags) { + public static int appendToByteArray(ExtendedCell cell, byte[] output, int offset, + boolean withTags) { int pos = offset; pos = Bytes.putInt(output, pos, keyLength(cell)); pos = Bytes.putInt(output, pos, cell.getValueLength()); @@ -416,7 +417,7 @@ public static KeyValue createFirstOnRow(byte[] buffer, final int boffset, final * @deprecated without any replacement. */ @Deprecated - public static KeyValue ensureKeyValue(final Cell cell) { + public static KeyValue ensureKeyValue(final ExtendedCell cell) { if (cell == null) return null; if (cell instanceof KeyValue) { if (cell.getClass().getName().equals(KeyValue.class.getName())) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java index 9013f9a9f26c..bf514d81c5a2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java @@ -29,6 +29,8 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map.Entry; +import java.util.NavigableMap; import java.util.Optional; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.io.TagCompressionContext; @@ -40,6 +42,8 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + /** * Utility methods helpful slinging {@link Cell} instances. It has more powerful and rich set of * APIs than those in {@link CellUtil} for internal usage. @@ -72,7 +76,7 @@ public static ByteRange fillValueRange(Cell cell, ByteRange range) { return range.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } - public static ByteRange fillTagRange(Cell cell, ByteRange range) { + public static ByteRange fillTagRange(ExtendedCell cell, ByteRange range) { return range.set(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); } @@ -582,8 +586,8 @@ public int write(OutputStream out, boolean withTags) throws IOException { * Made into a static method so as to reuse the logic within * ValueAndTagRewriteByteBufferExtendedCell */ - static int write(OutputStream out, boolean withTags, Cell cell, byte[] value, byte[] tags) - throws IOException { + static int write(OutputStream out, boolean withTags, ExtendedCell cell, byte[] value, + byte[] tags) throws IOException { int valLen = value == null ? 0 : value.length; ByteBufferUtils.putInt(out, KeyValueUtil.keyLength(cell));// Key length ByteBufferUtils.putInt(out, valLen);// Value length @@ -802,11 +806,16 @@ public static boolean matchingValue(final Cell left, final Cell right, int lvlen right.getValueArray(), right.getValueOffset(), rvlength); } - public static boolean matchingType(Cell a, Cell b) { + public static boolean matchingType(ExtendedCell a, ExtendedCell b) { return a.getTypeByte() == b.getTypeByte(); } - public static boolean matchingTags(final Cell left, final Cell right, int llength, int rlength) { + public static boolean matchingTags(final ExtendedCell left, final ExtendedCell right) { + return matchingTags(left, right, left.getTagsLength(), right.getTagsLength()); + } + + public static boolean matchingTags(final ExtendedCell left, final ExtendedCell right, int llength, + int rlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { ByteBufferExtendedCell leftBBCell = (ByteBufferExtendedCell) left; ByteBufferExtendedCell rightBBCell = (ByteBufferExtendedCell) right; @@ -840,7 +849,7 @@ public static boolean isDeleteType(ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.Delete.getCode(); } - public static boolean isDeleteFamily(final Cell cell) { + public static boolean isDeleteFamily(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteFamily.getCode(); } @@ -862,14 +871,14 @@ public static boolean isDeleteColumnOrFamily(ExtendedCell cell) { return t == KeyValue.Type.DeleteColumn.getCode() || t == KeyValue.Type.DeleteFamily.getCode(); } - public static byte[] cloneTags(Cell cell) { + public static byte[] cloneTags(ExtendedCell cell) { byte[] output = new byte[cell.getTagsLength()]; copyTagsTo(cell, output, 0); return output; } /** Copies the tags info into the tag portion of the cell */ - public static int copyTagsTo(Cell cell, byte[] destination, int destinationOffset) { + public static int copyTagsTo(ExtendedCell cell, byte[] destination, int destinationOffset) { int tlen = cell.getTagsLength(); if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyFromBufferToArray(destination, @@ -883,7 +892,7 @@ public static int copyTagsTo(Cell cell, byte[] destination, int destinationOffse } /** Copies the tags info into the tag portion of the cell */ - public static int copyTagsTo(Cell cell, ByteBuffer destination, int destinationOffset) { + public static int copyTagsTo(ExtendedCell cell, ByteBuffer destination, int destinationOffset) { int tlen = cell.getTagsLength(); if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getTagsByteBuffer(), @@ -900,7 +909,7 @@ public static int copyTagsTo(Cell cell, ByteBuffer destination, int destinationO * @param cell The Cell * @return Tags in the given Cell as a List */ - public static List getTags(Cell cell) { + public static List getTags(ExtendedCell cell) { List tags = new ArrayList<>(); Iterator tagsItr = tagsIterator(cell); while (tagsItr.hasNext()) { @@ -915,7 +924,7 @@ public static List getTags(Cell cell) { * @param type Type of the Tag to retrieve * @return Optional, empty if there is no tag of the passed in tag type */ - public static Optional getTag(Cell cell, byte type) { + public static Optional getTag(ExtendedCell cell, byte type) { boolean bufferBacked = cell instanceof ByteBufferExtendedCell; int length = cell.getTagsLength(); int offset = @@ -946,7 +955,7 @@ public static Optional getTag(Cell cell, byte type) { * @param cell The Cell over which tags iterator is needed. * @return iterator for the tags */ - public static Iterator tagsIterator(final Cell cell) { + public static Iterator tagsIterator(final ExtendedCell cell) { final int tagsLength = cell.getTagsLength(); // Save an object allocation where we can if (tagsLength == 0) { @@ -1069,8 +1078,8 @@ public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short * @param withTsType when true check timestamp and type bytes also. * @return length of common prefix */ - public static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck, - boolean withTsType) { + public static int findCommonPrefixInFlatKey(ExtendedCell c1, ExtendedCell c2, + boolean bypassFamilyCheck, boolean withTsType) { // Compare the 2 bytes in RK length part short rLen1 = c1.getRowLength(); short rLen2 = c2.getRowLength(); @@ -2196,7 +2205,7 @@ public Type getType() { * rk len><rk><1 byte cf len><cf><qualifier><8 bytes * timestamp><1 byte type> */ - public static void writeFlatKey(Cell cell, DataOutput out) throws IOException { + public static void writeFlatKey(ExtendedCell cell, DataOutput out) throws IOException { short rowLen = cell.getRowLength(); byte fLen = cell.getFamilyLength(); int qLen = cell.getQualifierLength(); @@ -2223,68 +2232,7 @@ public static void writeFlatKey(Cell cell, DataOutput out) throws IOException { out.writeByte(cell.getTypeByte()); } - /** - * Deep clones the given cell if the cell supports deep cloning - * @param cell the cell to be cloned - * @return the cloned cell - */ - public static Cell deepClone(Cell cell) throws CloneNotSupportedException { - if (cell instanceof ExtendedCell) { - return ((ExtendedCell) cell).deepClone(); - } - throw new CloneNotSupportedException(); - } - - /** - * Writes the cell to the given OutputStream - * @param cell the cell to be written - * @param out the outputstream - * @param withTags if tags are to be written or not - * @return the total bytes written - */ - public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException { - if (cell instanceof ExtendedCell) { - return ((ExtendedCell) cell).write(out, withTags); - } else { - ByteBufferUtils.putInt(out, estimatedSerializedSizeOfKey(cell)); - ByteBufferUtils.putInt(out, cell.getValueLength()); - writeFlatKey(cell, out); - writeValue(out, cell, cell.getValueLength()); - int tagsLength = cell.getTagsLength(); - if (withTags) { - byte[] len = new byte[Bytes.SIZEOF_SHORT]; - Bytes.putAsShort(len, 0, tagsLength); - out.write(len); - if (tagsLength > 0) { - writeTags(out, cell, tagsLength); - } - } - int lenWritten = - (2 * Bytes.SIZEOF_INT) + estimatedSerializedSizeOfKey(cell) + cell.getValueLength(); - if (withTags) { - lenWritten += Bytes.SIZEOF_SHORT + tagsLength; - } - return lenWritten; - } - } - - /** - * Writes a cell to the buffer at the given offset - * @param cell the cell to be written - * @param buf the buffer to which the cell has to be wrriten - * @param offset the offset at which the cell should be written - */ - public static void writeCellToBuffer(Cell cell, ByteBuffer buf, int offset) { - if (cell instanceof ExtendedCell) { - ((ExtendedCell) cell).write(buf, offset); - } else { - // Using the KVUtil - byte[] bytes = KeyValueUtil.copyToNewByteArray(cell); - ByteBufferUtils.copyFromArrayToBuffer(buf, offset, bytes, 0, bytes.length); - } - } - - public static int writeFlatKey(Cell cell, OutputStream out) throws IOException { + public static int writeFlatKey(ExtendedCell cell, OutputStream out) throws IOException { short rowLen = cell.getRowLength(); byte fLen = cell.getFamilyLength(); int qLen = cell.getQualifierLength(); @@ -2359,9 +2307,9 @@ public static void setTimestamp(Cell cell, byte[] ts) throws IOException { * @return True if cell timestamp is modified. * @throws IOException when the passed cell is not of type {@link ExtendedCell} */ - public static boolean updateLatestStamp(Cell cell, long ts) throws IOException { + public static boolean updateLatestStamp(ExtendedCell cell, long ts) throws IOException { if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) { - setTimestamp(cell, ts); + cell.setTimestamp(ts); return true; } return false; @@ -2452,7 +2400,8 @@ public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, * @param cell The cell whose contents has to be written * @param vlength the value length */ - public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException { + public static void writeValue(OutputStream out, ExtendedCell cell, int vlength) + throws IOException { if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getValueByteBuffer(), ((ByteBufferExtendedCell) cell).getValuePosition(), vlength); @@ -2467,7 +2416,8 @@ public static void writeValue(OutputStream out, Cell cell, int vlength) throws I * @param cell The cell whose contents has to be written * @param tagsLength the tag length */ - public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException { + public static void writeTags(OutputStream out, ExtendedCell cell, int tagsLength) + throws IOException { if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getTagsByteBuffer(), ((ByteBufferExtendedCell) cell).getTagsPosition(), tagsLength); @@ -2479,22 +2429,31 @@ public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws /** * special case for Cell.equals */ - public static boolean equalsIgnoreMvccVersion(Cell a, Cell b) { + public static boolean equalsIgnoreMvccVersion(ExtendedCell a, ExtendedCell b) { // row boolean res = CellUtil.matchingRows(a, b); - if (!res) return res; + if (!res) { + return res; + } // family res = CellUtil.matchingColumn(a, b); - if (!res) return res; + if (!res) { + return res; + } // timestamp: later sorts first - if (!CellUtil.matchingTimestamp(a, b)) return false; + if (!CellUtil.matchingTimestamp(a, b)) { + return false; + } // type int c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte()); - if (c != 0) return false; - else return true; + if (c != 0) { + return false; + } else { + return true; + } } /** @@ -2619,8 +2578,8 @@ public static void compressQualifier(OutputStream out, Cell cell, Dictionary dic * @return an int greater than 0 if left is greater than right lesser than 0 if left is lesser * than right equal to 0 if left is equal to right */ - public static final int compare(CellComparator comparator, Cell left, byte[] key, int offset, - int length) { + public static final int compare(CellComparator comparator, ExtendedCell left, byte[] key, + int offset, int length) { // row short rrowlength = Bytes.toShort(key, offset); int c = comparator.compareRows(left, key, offset + Bytes.SIZEOF_SHORT, rrowlength); @@ -2646,7 +2605,7 @@ public static final int compare(CellComparator comparator, Cell left, byte[] key * @return greater than 0 if left cell is bigger, less than 0 if right cell is bigger, 0 if both * cells are equal */ - static final int compareWithoutRow(CellComparator comparator, Cell left, byte[] right, + static final int compareWithoutRow(CellComparator comparator, ExtendedCell left, byte[] right, int roffset, int rlength, short rowlength) { /*** * KeyValue Format and commonLength: @@ -2954,4 +2913,154 @@ public static ExtendedCell createLastOnRowCol(final Cell cell) { public static ExtendedCell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) { return new FirstOnRowDeleteFamilyCell(row, fam); } + + /** + * In fact, in HBase, all {@link Cell}s are {@link ExtendedCell}s. We do not expect users to + * implement their own {@link Cell} types, except some special projects like Phoenix, where they + * just use {@link org.apache.hadoop.hbase.KeyValue} and {@link ExtendedCell} directly. + * @return the original {@code cell} which has already been cast to an {@link ExtendedCell}. + * @throws IllegalArgumentException if the given {@code cell} is not an {@link ExtendedCell}. + */ + public static ExtendedCell ensureExtendedCell(Cell cell) { + if (cell == null) { + return null; + } + Preconditions.checkArgument(cell instanceof ExtendedCell, "Unsupported cell type: %s", + cell.getClass().getName()); + return (ExtendedCell) cell; + } + + public static boolean equals(ExtendedCell a, ExtendedCell b) { + return CellUtil.matchingRows(a, b) && CellUtil.matchingFamily(a, b) + && CellUtil.matchingQualifier(a, b) && CellUtil.matchingTimestamp(a, b) + && PrivateCellUtil.matchingType(a, b); + } + + /** Returns ExtendedCellScanner interface over cellIterables */ + public static ExtendedCellScanner + createExtendedCellScanner(final List cellScannerables) { + return new ExtendedCellScanner() { + private final Iterator iterator = + cellScannerables.iterator(); + private ExtendedCellScanner cellScanner = null; + + @Override + public ExtendedCell current() { + return this.cellScanner != null ? this.cellScanner.current() : null; + } + + @Override + public boolean advance() throws IOException { + while (true) { + if (this.cellScanner == null) { + if (!this.iterator.hasNext()) { + return false; + } + this.cellScanner = this.iterator.next().cellScanner(); + } + if (this.cellScanner.advance()) { + return true; + } + this.cellScanner = null; + } + } + }; + } + + /** + * Flatten the map of cells out under the ExtendedCellScanner + * @param map Map of Cell Lists; for example, the map of families to ExtendedCells that is used + * inside Put, etc., keeping Cells organized by family. + * @return ExtendedCellScanner interface over cellIterable + */ + public static ExtendedCellScanner + createExtendedCellScanner(final NavigableMap> map) { + return new ExtendedCellScanner() { + private final Iterator>> entries = map.entrySet().iterator(); + private Iterator currentIterator = null; + private ExtendedCell currentCell; + + @Override + public ExtendedCell current() { + return this.currentCell; + } + + @Override + public boolean advance() { + while (true) { + if (this.currentIterator == null) { + if (!this.entries.hasNext()) { + return false; + } + this.currentIterator = this.entries.next().getValue().iterator(); + } + if (this.currentIterator.hasNext()) { + this.currentCell = this.currentIterator.next(); + return true; + } + this.currentCell = null; + this.currentIterator = null; + } + } + }; + } + + /** Returns CellScanner interface over cellArray */ + public static ExtendedCellScanner createExtendedCellScanner(final ExtendedCell[] cellArray) { + return new ExtendedCellScanner() { + private final ExtendedCell[] cells = cellArray; + private int index = -1; + + @Override + public ExtendedCell current() { + if (cells == null) { + return null; + } + return (index < 0) ? null : this.cells[index]; + } + + @Override + public boolean advance() { + if (cells == null) { + return false; + } + return ++index < this.cells.length; + } + }; + } + + /** Returns ExtendedCellScanner interface over cellIterable */ + public static ExtendedCellScanner + createExtendedCellScanner(final Iterable cellIterable) { + if (cellIterable == null) { + return null; + } + return createExtendedCellScanner(cellIterable.iterator()); + } + + /** + * Returns ExtendedCellScanner interface over cellIterable or null if + * cells is null + */ + public static ExtendedCellScanner createExtendedCellScanner(final Iterator cells) { + if (cells == null) { + return null; + } + return new ExtendedCellScanner() { + private final Iterator iterator = cells; + private ExtendedCell current = null; + + @Override + public ExtendedCell current() { + return this.current; + } + + @Override + public boolean advance() { + boolean hasNext = this.iterator.hasNext(); + this.current = hasNext ? this.iterator.next() : null; + return hasNext; + } + }; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java index 6d911bccd58a..cc15fa7ba752 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java @@ -66,12 +66,12 @@ public static Pair readVIntValuePart(Tag tag, int offset) thro } /** Returns A List<Tag> of any Tags found in cell else null. */ - public static List carryForwardTags(final Cell cell) { + public static List carryForwardTags(final ExtendedCell cell) { return carryForwardTags(null, cell); } /** Add to tagsOrNull any Tags cell is carrying or null if none. */ - public static List carryForwardTags(final List tagsOrNull, final Cell cell) { + public static List carryForwardTags(final List tagsOrNull, final ExtendedCell cell) { Iterator itr = PrivateCellUtil.tagsIterator(cell); if (itr == EMPTY_TAGS_ITR) { // If no Tags, return early. @@ -87,7 +87,7 @@ public static List carryForwardTags(final List tagsOrNull, final Cell return tags; } - public static byte[] concatTags(byte[] tags, Cell cell) { + public static byte[] concatTags(byte[] tags, ExtendedCell cell) { int cellTagsLen = cell.getTagsLength(); if (cellTagsLen == 0) { // If no Tags, return early. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java index 9a2a29356b14..8f4692ee778f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java @@ -22,21 +22,21 @@ import java.io.IOException; import java.io.InputStream; import java.io.PushbackInputStream; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * TODO javadoc + * Base implementation for {@link Codec.Decoder}. */ @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public abstract class BaseDecoder implements Codec.Decoder { protected static final Logger LOG = LoggerFactory.getLogger(BaseDecoder.class); protected final InputStream in; - private Cell current = null; + private ExtendedCell current = null; protected static class PBIS extends PushbackInputStream { public PBIS(InputStream in, int size) { @@ -98,10 +98,10 @@ protected InputStream getInputStream() { * thrown if EOF is reached prematurely. Does not return null. */ @NonNull - protected abstract Cell parseCell() throws IOException; + protected abstract ExtendedCell parseCell() throws IOException; @Override - public Cell current() { + public ExtendedCell current() { return this.current; } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java index 3ca5d2462de3..4875fcd227c9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.io.OutputStream; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; @@ -42,7 +42,7 @@ protected OutputStream getOuputStream() { } @Override - public abstract void write(Cell cell) throws IOException; + public abstract void write(ExtendedCell cell) throws IOException; protected void checkFlushed() throws CodecException { if (this.flushed) throw new CodecException("Flushed; done"); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java index f4552c038267..356513be7f75 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java @@ -21,8 +21,8 @@ import java.io.InputStream; import java.io.OutputStream; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -43,7 +43,7 @@ static class CellEncoder extends BaseEncoder { } @Override - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Row write(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); @@ -80,7 +80,7 @@ public CellDecoder(final InputStream in) { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { byte[] row = readByteArray(this.in); byte[] family = readByteArray(in); byte[] qualifier = readByteArray(in); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java index 07bfb53d5df7..6a8e7e944439 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java @@ -21,8 +21,8 @@ import java.io.InputStream; import java.io.OutputStream; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -44,7 +44,7 @@ static class CellEncoder extends BaseEncoder { } @Override - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Row write(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); @@ -82,7 +82,7 @@ public CellDecoder(final InputStream in) { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { byte[] row = readByteArray(this.in); byte[] family = readByteArray(in); byte[] qualifier = readByteArray(in); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java index df8a94f5614b..18c8b8ea0ab8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java @@ -19,7 +19,7 @@ import java.io.InputStream; import java.io.OutputStream; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.CellOutputStream; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -46,7 +46,7 @@ interface Encoder extends CellOutputStream { * Implementations should implicitly clean up any resources allocated when the Decoder/CellScanner * runs off the end of the cell block. Do this rather than require the user call close explicitly. */ - interface Decoder extends CellScanner { + interface Decoder extends ExtendedCellScanner { } Decoder getDecoder(InputStream is); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java index 9913eac3615c..ef40b395b7b1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java @@ -21,7 +21,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; @@ -55,7 +55,7 @@ public KeyValueEncoder(final OutputStream out) { } @Override - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Do not write tags over RPC ByteBufferUtils.putInt(this.out, KeyValueUtil.getSerializedSize(cell, false)); @@ -69,7 +69,7 @@ public KeyValueDecoder(final InputStream in) { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { // No tags here return KeyValueUtil.createKeyValueFromInputStream(in, false); } @@ -78,7 +78,7 @@ protected Cell parseCell() throws IOException { public static class ByteBuffKeyValueDecoder implements Codec.Decoder { protected final ByteBuff buf; - protected Cell current = null; + protected ExtendedCell current = null; public ByteBuffKeyValueDecoder(ByteBuff buf) { this.buf = buf; @@ -101,15 +101,15 @@ public boolean advance() throws IOException { } @Override - public Cell current() { + public ExtendedCell current() { return this.current; } - protected Cell createCell(byte[] buf, int offset, int len) { + protected ExtendedCell createCell(byte[] buf, int offset, int len) { return new NoTagsKeyValue(buf, offset, len); } - protected Cell createCell(ByteBuffer bb, int pos, int len) { + protected ExtendedCell createCell(ByteBuffer bb, int pos, int len) { // We know there is not going to be any tags. return new NoTagsByteBufferKeyValue(bb, pos, len); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java index 8c2c20625dfb..655bc4c5f261 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java @@ -22,7 +22,7 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -58,7 +58,7 @@ public KeyValueEncoder(final OutputStream out) { } @Override - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Write tags ByteBufferUtils.putInt(this.out, KeyValueUtil.getSerializedSize(cell, true)); @@ -72,7 +72,7 @@ public KeyValueDecoder(final InputStream in) { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { // create KeyValue with tags return KeyValueUtil.createKeyValueFromInputStream(in, true); } @@ -85,12 +85,12 @@ public ByteBuffKeyValueDecoder(ByteBuff buf) { } @Override - protected Cell createCell(byte[] buf, int offset, int len) { + protected ExtendedCell createCell(byte[] buf, int offset, int len) { return new KeyValue(buf, offset, len); } @Override - protected Cell createCell(ByteBuffer bb, int pos, int len) { + protected ExtendedCell createCell(ByteBuffer bb, int pos, int len) { return new ByteBufferKeyValue(bb, pos, len); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java index d1310137e8ce..8236dceca913 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.io; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -40,7 +40,7 @@ public interface CellOutputStream { * the copy of the Cell that was added in the write. * @param cell Cell to write out */ - void write(Cell cell) throws IOException; + void write(ExtendedCell cell) throws IOException; /** * Let the implementation decide what to do. Usually means writing accumulated data into a byte[] diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/SizedCellScanner.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/SizedExtendedCellScanner.java similarity index 85% rename from hbase-common/src/main/java/org/apache/hadoop/hbase/io/SizedCellScanner.java rename to hbase-common/src/main/java/org/apache/hadoop/hbase/io/SizedExtendedCellScanner.java index 379dfca051a3..8f31ba8fc832 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/SizedCellScanner.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/SizedExtendedCellScanner.java @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hbase.io; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; /** * A CellScanner that knows its size in memory in bytes. Used playing the CellScanner into an * in-memory buffer; knowing the size ahead of time saves on background buffer resizings. */ @InterfaceAudience.Private -@InterfaceStability.Unstable -public interface SizedCellScanner extends CellScanner, HeapSize { +public interface SizedExtendedCellScanner extends ExtendedCellScanner, HeapSize { } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java index 68b300ae60fe..31724723bcd4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java @@ -29,7 +29,7 @@ import java.util.List; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -93,7 +93,7 @@ public EncodedDataBlock(Configuration conf, DataBlockEncoder dataBlockEncoder, * @param headerSize header size of the block. * @return Forwards sequential iterator. */ - public Iterator getIterator(int headerSize) { + public Iterator getIterator(int headerSize) { final int rawSize = rawKVs.length; byte[] encodedDataWithHeader = getEncodedData(); int bytesToSkip = headerSize + Bytes.SIZEOF_SHORT; @@ -101,7 +101,7 @@ public Iterator getIterator(int headerSize) { encodedDataWithHeader.length - bytesToSkip); final DataInputStream dis = new DataInputStream(bais); - return new Iterator() { + return new Iterator() { private ByteBuffer decompressedData = null; private Iterator it = isTagsLenZero.iterator(); @@ -114,7 +114,7 @@ public boolean hasNext() { } @Override - public Cell next() { + public ExtendedCell next() { if (decompressedData == null) { try { decompressedData = dataBlockEncoder.decodeKeyValues(dis, diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java index 2c48e5b7d7b8..7fb4fd9685e9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java @@ -19,7 +19,7 @@ import java.io.DataOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -37,7 +37,7 @@ public NoneEncoder(DataOutputStream out, HFileBlockDefaultEncodingContext encodi this.encodingCtx = encodingCtx; } - public int write(Cell cell) throws IOException { + public int write(ExtendedCell cell) throws IOException { // We write tags seperately because though there is no tag in KV // if the hfilecontext says include tags we need the tags length to be // written diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index c4b24728b427..553b39311369 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -76,7 +76,7 @@ public void testCompareCells() { kv1 = new KeyValue(row1, fam1, qual1, 1L, KeyValue.Type.Put); kv2 = new KeyValue(row1, fam1, qual1, 1L, KeyValue.Type.Put); - assertTrue(CellUtil.equals(kv1, kv2)); + assertTrue(PrivateCellUtil.equals(kv1, kv2)); } @Test diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java index dcd796fa3d4f..3ff63ef6ff65 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.math.BigDecimal; import java.nio.ByteBuffer; @@ -92,7 +91,7 @@ public boolean advance() { /** * Cell used in test. Has row only. */ - private static class TestCell implements Cell { + private static class TestCell implements ExtendedCell { private final byte[] row; TestCell(final int i) { @@ -116,67 +115,56 @@ public short getRowLength() { @Override public byte[] getFamilyArray() { - // TODO Auto-generated method stub return null; } @Override public int getFamilyOffset() { - // TODO Auto-generated method stub return 0; } @Override public byte getFamilyLength() { - // TODO Auto-generated method stub return 0; } @Override public byte[] getQualifierArray() { - // TODO Auto-generated method stub return null; } @Override public int getQualifierOffset() { - // TODO Auto-generated method stub return 0; } @Override public int getQualifierLength() { - // TODO Auto-generated method stub return 0; } @Override public long getTimestamp() { - // TODO Auto-generated method stub return 0; } @Override public byte getTypeByte() { - // TODO Auto-generated method stub return 0; } @Override public byte[] getValueArray() { - // TODO Auto-generated method stub return null; } @Override public int getValueOffset() { - // TODO Auto-generated method stub return 0; } @Override public int getValueLength() { - // TODO Auto-generated method stub return 0; } @@ -187,25 +175,21 @@ public int getSerializedSize() { @Override public byte[] getTagsArray() { - // TODO Auto-generated method stub return null; } @Override public int getTagsOffset() { - // TODO Auto-generated method stub return 0; } @Override public long getSequenceId() { - // TODO Auto-generated method stub return 0; } @Override public int getTagsLength() { - // TODO Auto-generated method stub return 0; } @@ -213,6 +197,20 @@ public int getTagsLength() { public long heapSize() { return 0; } + + @Override + public void setSequenceId(long seqId) throws IOException { + + } + + @Override + public void setTimestamp(long ts) throws IOException { + + } + + @Override + public void setTimestamp(byte[] ts) throws IOException { + } } /** @@ -444,7 +442,7 @@ public void testCloneCellFieldsFromByteBufferedCell() { KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, KeyValue.Type.Put, v, 0, v.length, tags); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); - Cell bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); + ExtendedCell bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); byte[] rDest = CellUtil.cloneRow(bbCell); assertTrue(Bytes.equals(r, rDest)); byte[] fDest = CellUtil.cloneFamily(bbCell); @@ -519,30 +517,9 @@ public void testCellFieldsAsPrimitiveTypesFromByteBufferedCell() { assertEquals(bd, PrivateCellUtil.getValueAsBigDecimal(bbCell)); } - @Test - public void testWriteCell() throws IOException { - byte[] r = Bytes.toBytes("row1"); - byte[] f = Bytes.toBytes("cf1"); - byte[] q1 = Bytes.toBytes("qual1"); - byte[] v = Bytes.toBytes("val1"); - byte[] tags = Bytes.toBytes("tag1"); - KeyValue kv = - new KeyValue(r, f, q1, 0, q1.length, 1234L, KeyValue.Type.Put, v, 0, v.length, tags); - NonExtendedCell nonExtCell = new NonExtendedCell(kv); - ByteArrayOutputStream os = new ByteArrayOutputStream(); - PrivateCellUtil.writeCell(nonExtCell, os, true); - byte[] byteArray = os.toByteArray(); - KeyValue res = new KeyValue(byteArray); - assertTrue(CellUtil.equals(kv, res)); - } - - // Workaround for jdk 11 - reflective access to interface default methods for testGetType - private static abstract class CellForMockito implements Cell { - } - @Test public void testGetType() { - CellForMockito c = Mockito.mock(CellForMockito.class); + ExtendedCell c = Mockito.mock(ExtendedCell.class); Mockito.when(c.getType()).thenCallRealMethod(); for (Cell.Type type : Cell.Type.values()) { Mockito.when(c.getTypeByte()).thenReturn(type.getCode()); @@ -563,112 +540,4 @@ public void testGetType() { } catch (UnsupportedOperationException e) { } } - - private static class NonExtendedCell implements Cell { - private KeyValue kv; - - public NonExtendedCell(KeyValue kv) { - this.kv = kv; - } - - @Override - public byte[] getRowArray() { - return this.kv.getRowArray(); - } - - @Override - public int getRowOffset() { - return this.kv.getRowOffset(); - } - - @Override - public short getRowLength() { - return this.kv.getRowLength(); - } - - @Override - public byte[] getFamilyArray() { - return this.kv.getFamilyArray(); - } - - @Override - public int getFamilyOffset() { - return this.kv.getFamilyOffset(); - } - - @Override - public byte getFamilyLength() { - return this.kv.getFamilyLength(); - } - - @Override - public byte[] getQualifierArray() { - return this.kv.getQualifierArray(); - } - - @Override - public int getQualifierOffset() { - return this.kv.getQualifierOffset(); - } - - @Override - public int getQualifierLength() { - return this.kv.getQualifierLength(); - } - - @Override - public long getTimestamp() { - return this.kv.getTimestamp(); - } - - @Override - public byte getTypeByte() { - return this.kv.getTypeByte(); - } - - @Override - public long getSequenceId() { - return this.kv.getSequenceId(); - } - - @Override - public byte[] getValueArray() { - return this.kv.getValueArray(); - } - - @Override - public int getValueOffset() { - return this.kv.getValueOffset(); - } - - @Override - public int getValueLength() { - return this.kv.getValueLength(); - } - - @Override - public int getSerializedSize() { - return this.kv.getSerializedSize(); - } - - @Override - public byte[] getTagsArray() { - return this.kv.getTagsArray(); - } - - @Override - public int getTagsOffset() { - return this.kv.getTagsOffset(); - } - - @Override - public int getTagsLength() { - return this.kv.getTagsLength(); - } - - @Override - public long heapSize() { - return this.kv.heapSize(); - } - } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java index 9963e37c17f6..3508419cea78 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodec.java @@ -26,10 +26,10 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -113,14 +113,14 @@ public void testThree() throws IOException { DataInputStream dis = new DataInputStream(cis); Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); - Cell c = decoder.current(); - assertTrue(CellUtil.equals(c, kv1)); + ExtendedCell c = decoder.current(); + assertTrue(PrivateCellUtil.equals(c, kv1)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellUtil.equals(c, kv2)); + assertTrue(PrivateCellUtil.equals(c, kv2)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellUtil.equals(c, kv3)); + assertTrue(PrivateCellUtil.equals(c, kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset, cis.getCount()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java index b44473ee49e6..17c95eb2c729 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java @@ -28,8 +28,8 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -58,15 +58,15 @@ public void testCellWithTag() throws IOException { DataOutputStream dos = new DataOutputStream(cos); Codec codec = new CellCodecWithTags(); Codec.Encoder encoder = codec.getEncoder(dos); - final Cell cell1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), - HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1"), + final ExtendedCell cell1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), + Bytes.toBytes("1"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1"), new Tag[] { new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring1")), new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring2")) }); - final Cell cell2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), - HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"), + final ExtendedCell cell2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), + Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"), new Tag[] { new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring3")), }); - final Cell cell3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), - HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3"), + final ExtendedCell cell3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), + Bytes.toBytes("3"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3"), new Tag[] { new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring4")), new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring5")), new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring6")) }); @@ -81,7 +81,7 @@ public void testCellWithTag() throws IOException { DataInputStream dis = new DataInputStream(cis); Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); - Cell c = decoder.current(); + ExtendedCell c = decoder.current(); assertTrue(CellUtil.equals(c, cell1)); List tags = PrivateCellUtil.getTags(c); assertEquals(2, tags.size()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java index 66a874e5c5ec..e2bdecbf7c6b 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java @@ -28,8 +28,8 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -81,7 +81,7 @@ public void testKeyValueWithTag() throws IOException { DataInputStream dis = new DataInputStream(cis); Codec.Decoder decoder = codec.getDecoder(dis); assertTrue(decoder.advance()); - Cell c = decoder.current(); + ExtendedCell c = decoder.current(); assertTrue(CellUtil.equals(c, kv1)); List tags = PrivateCellUtil.getTags(c); assertEquals(2, tags.size()); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java index 9d122fb0b81c..cb6e14d1b278 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java @@ -26,8 +26,8 @@ import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -72,13 +72,14 @@ public HBaseRpcController newController() { } @Override - public HBaseRpcController newController(RegionInfo regionInfo, CellScanner cellScanner) { + public HBaseRpcController newController(RegionInfo regionInfo, + ExtendedCellScanner cellScanner) { return new CountingRpcController(super.newController(regionInfo, cellScanner)); } @Override public HBaseRpcController newController(RegionInfo regionInfo, - List cellIterables) { + List cellIterables) { return new CountingRpcController(super.newController(regionInfo, cellIterables)); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java index 9d567f95a0e0..456f99629e34 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java @@ -23,6 +23,7 @@ import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -85,7 +86,12 @@ public void open(OutputStream os) throws IOException { @Override public void serialize(Cell kv) throws IOException { dos.writeInt(PrivateCellUtil.estimatedSerializedSizeOf(kv) - Bytes.SIZEOF_INT); - PrivateCellUtil.writeCell(kv, dos, true); + if (kv instanceof ExtendedCell) { + ((ExtendedCell) kv).write(dos, true); + } else { + throw new UnsupportedOperationException( + "Unsupported cell type: " + kv.getClass().getName()); + } } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java index 9380b0e71336..dd6ff00497f4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.mapreduce; -import java.io.IOException; import java.util.TreeSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; @@ -38,19 +38,17 @@ public class CellSortReducer protected void reduce(ImmutableBytesWritable row, Iterable kvs, Reducer.Context context) throws java.io.IOException, InterruptedException { - TreeSet map = new TreeSet<>(CellComparator.getInstance()); + TreeSet set = new TreeSet<>(CellComparator.getInstance()); for (Cell kv : kvs) { - try { - map.add(PrivateCellUtil.deepClone(kv)); - } catch (CloneNotSupportedException e) { - throw new IOException(e); - } + set.add(PrivateCellUtil.ensureExtendedCell(kv)); } - context.setStatus("Read " + map.getClass()); + context.setStatus("Read " + set.getClass()); int index = 0; - for (Cell kv : map) { + for (ExtendedCell kv : set) { context.write(row, new MapReduceExtendedCell(kv)); - if (++index % 100 == 0) context.setStatus("Wrote " + index); + if (++index % 100 == 0) { + context.setStatus("Wrote " + index); + } } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExtendedCellSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExtendedCellSerialization.java index c784b2561881..9e89c7896261 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExtendedCellSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExtendedCellSerialization.java @@ -94,7 +94,7 @@ public void open(OutputStream os) throws IOException { @Override public void serialize(ExtendedCell kv) throws IOException { dos.writeInt(PrivateCellUtil.estimatedSerializedSizeOf(kv) - Bytes.SIZEOF_INT); - PrivateCellUtil.writeCell(kv, dos, true); + kv.write(dos, true); dos.writeLong(kv.getSequenceId()); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 225a2dc6fe2b..51e23abc8ca6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -240,13 +240,13 @@ static RecordWriter createRecordWrit @Override public void write(ImmutableBytesWritable row, V cell) throws IOException { - Cell kv = cell; // null input == user explicitly wants to flush - if (row == null && kv == null) { + if (row == null && cell == null) { rollWriters(null); return; } + ExtendedCell kv = PrivateCellUtil.ensureExtendedCell(cell); byte[] rowKey = CellUtil.cloneRow(kv); int length = (PrivateCellUtil.estimatedSerializedSizeOf(kv)) - Bytes.SIZEOF_INT; byte[] family = CellUtil.cloneFamily(kv); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 6605c6783ba8..8a8b846959b6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -114,7 +116,7 @@ public int getPartition(CellWritableComparable key, Cell value, int numPartition public static class CellWritableComparable implements WritableComparable { - private Cell kv = null; + private ExtendedCell kv = null; static { // register this comparator @@ -125,7 +127,7 @@ public CellWritableComparable() { } public CellWritableComparable(Cell kv) { - this.kv = kv; + this.kv = (ExtendedCell) kv; } @Override @@ -174,7 +176,7 @@ protected void reduce(CellWritableComparable row, Iterable kvs, int index = 0; for (Cell kv : kvs) { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), - new MapReduceExtendedCell(kv)); + new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(kv))); if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } @@ -203,10 +205,12 @@ public void map(ImmutableBytesWritable row, Result value, Context context) throw filter == null || !filter.filterRowKey( PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) ) { - for (Cell kv : value.rawCells()) { + for (ExtendedCell kv : PackagePrivateFieldAccessor.getExtendedRawCells(value)) { kv = filterKv(filter, kv); // skip if we filtered it out - if (kv == null) continue; + if (kv == null) { + continue; + } Cell ret = convertKv(kv, cfRenameMap); context.write(new CellWritableComparable(ret), ret); } @@ -267,10 +271,12 @@ public void map(ImmutableBytesWritable row, Result value, Context context) throw filter == null || !filter.filterRowKey( PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) ) { - for (Cell kv : value.rawCells()) { + for (ExtendedCell kv : PackagePrivateFieldAccessor.getExtendedRawCells(value)) { kv = filterKv(filter, kv); // skip if we filtered it out - if (kv == null) continue; + if (kv == null) { + continue; + } context.write(row, new MapReduceExtendedCell(convertKv(kv, cfRenameMap))); } } @@ -330,10 +336,12 @@ private void writeResult(ImmutableBytesWritable key, Result result, Context cont protected void processKV(ImmutableBytesWritable key, Result result, Context context, Put put, Delete delete) throws IOException, InterruptedException { - for (Cell kv : result.rawCells()) { + for (ExtendedCell kv : PackagePrivateFieldAccessor.getExtendedRawCells(result)) { kv = filterKv(filter, kv); // skip if we filter it out - if (kv == null) continue; + if (kv == null) { + continue; + } kv = convertKv(kv, cfRenameMap); // Deletes and Puts are gathered and written when finished @@ -476,7 +484,7 @@ private static ArrayList toQuotedByteArrays(String... stringArgs) { * @return null if the key should not be written, otherwise returns the original * {@link Cell} */ - public static Cell filterKv(Filter filter, Cell c) throws IOException { + public static ExtendedCell filterKv(Filter filter, ExtendedCell c) throws IOException { // apply the filter and skip this kv if the filter doesn't apply if (filter != null) { Filter.ReturnCode code = filter.filterCell(c); @@ -495,7 +503,7 @@ public static Cell filterKv(Filter filter, Cell c) throws IOException { } // helper: create a new KeyValue based on CF rename map - private static Cell convertKv(Cell kv, Map cfRenameMap) { + private static ExtendedCell convertKv(ExtendedCell kv, Map cfRenameMap) { if (cfRenameMap != null) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 90dc5c1d555f..cd25736bd6ee 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -21,10 +21,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.mapreduce.Reducer; import org.apache.yetus.audience.InterfaceAudience; @@ -51,45 +49,51 @@ protected void reduce(K row, Iterable vals, Context context) context.getConfiguration().getLong("putcombiner.row.threshold", 1L * (1 << 30)); int cnt = 0; long curSize = 0; - Put put = null; - Map> familyMap = null; + Put combinedPut = null; + Map> combinedFamilyMap = null; for (Put p : vals) { cnt++; - if (put == null) { - put = p; - familyMap = put.getFamilyCellMap(); + if (combinedPut == null) { + combinedPut = p; + combinedFamilyMap = PackagePrivateFieldAccessor.getExtendedFamilyCellMap(combinedPut); } else { - for (Entry> entry : p.getFamilyCellMap().entrySet()) { - List cells = familyMap.get(entry.getKey()); - List kvs = (cells != null) ? (List) cells : null; - for (Cell cell : entry.getValue()) { - KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); - curSize += kv.heapSize(); - if (kvs != null) { - kvs.add(kv); + for (Entry> entry : PackagePrivateFieldAccessor + .getExtendedFamilyCellMap(p).entrySet()) { + List existCells = combinedFamilyMap.get(entry.getKey()); + if (existCells == null) { + // no cells for this family yet, just put it + combinedFamilyMap.put(entry.getKey(), entry.getValue()); + // do not forget to calculate the size + for (ExtendedCell cell : entry.getValue()) { + curSize += cell.heapSize(); + } + } else { + // otherwise just add the cells to the existent list for this family + for (ExtendedCell cell : entry.getValue()) { + existCells.add(cell); + curSize += cell.heapSize(); } } - if (cells == null) { - familyMap.put(entry.getKey(), entry.getValue()); - } } - if (cnt % 10 == 0) context.setStatus("Combine " + cnt); + if (cnt % 10 == 0) { + context.setStatus("Combine " + cnt); + } if (curSize > threshold) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("Combined %d Put(s) into %d.", cnt, 1)); } - context.write(row, put); - put = null; + context.write(row, combinedPut); + combinedPut = null; curSize = 0; cnt = 0; } } } - if (put != null) { + if (combinedPut != null) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("Combined %d Put(s) into %d.", cnt, 1)); } - context.write(row, put); + context.write(row, combinedPut); } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index b4061d6be6a9..c8f32c205fb7 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -24,7 +24,6 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -62,9 +62,9 @@ public class PutSortReducer } @Override - protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, + protected void reduce(ImmutableBytesWritable row, Iterable puts, Reducer.Context context) - throws java.io.IOException, InterruptedException { + throws IOException, InterruptedException { // although reduce() is called per-row, handle pathological case long threshold = context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30)); @@ -100,8 +100,9 @@ protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, // just ignoring the bad one? throw new IOException("Invalid visibility expression found in mutation " + p, e); } - for (List cells : p.getFamilyCellMap().values()) { - for (ExtendedCell cell : (List) (List) cells) { + for (List cells : PackagePrivateFieldAccessor.getExtendedFamilyCellMap(p) + .values()) { + for (ExtendedCell cell : cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 37e99c096e5c..888e285f340e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -117,7 +117,8 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { byte[] outKey = multiTableSupport ? Bytes.add(table.getName(), Bytes.toBytes(tableSeparator), CellUtil.cloneRow(cell)) : CellUtil.cloneRow(cell); - context.write(new ImmutableBytesWritable(outKey), new MapReduceExtendedCell(cell)); + context.write(new ImmutableBytesWritable(outKey), + new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(cell))); } } } catch (InterruptedException e) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceExtendedCell.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceExtendedCell.java index ca0d5f63a078..4233e96f2c79 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceExtendedCell.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceExtendedCell.java @@ -21,7 +21,6 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferExtendedCell; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -33,9 +32,9 @@ @InterfaceAudience.Private public class MapReduceExtendedCell extends ByteBufferExtendedCell { - private final Cell cell; + private final ExtendedCell cell; - public MapReduceExtendedCell(Cell cell) { + public MapReduceExtendedCell(ExtendedCell cell) { this.cell = cell; } @@ -226,17 +225,17 @@ public String toString() { @Override public void setSequenceId(long seqId) throws IOException { - PrivateCellUtil.setSequenceId(cell, seqId); + cell.setSequenceId(seqId); } @Override public void setTimestamp(long ts) throws IOException { - PrivateCellUtil.setTimestamp(cell, ts); + cell.setTimestamp(ts); } @Override public void setTimestamp(byte[] ts) throws IOException { - PrivateCellUtil.setTimestamp(cell, ts); + cell.setTimestamp(ts); } @Override @@ -246,7 +245,7 @@ public long heapSize() { @Override public int write(OutputStream out, boolean withTags) throws IOException { - return PrivateCellUtil.writeCell(cell, out, withTags); + return cell.write(out, withTags); } @Override @@ -256,15 +255,11 @@ public int getSerializedSize(boolean withTags) { @Override public void write(ByteBuffer buf, int offset) { - PrivateCellUtil.writeCellToBuffer(cell, buf, offset); + cell.write(buf, offset); } @Override public ExtendedCell deepClone() { - try { - return (ExtendedCell) PrivateCellUtil.deepClone(cell); - } catch (CloneNotSupportedException e) { - throw new RuntimeException(e); - } + return cell.deepClone(); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index e67ee3dbb736..0714f27e64d9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -518,7 +519,7 @@ public void test_WritingTagData() throws Exception { HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); - Cell cell = scanner.getCell(); + ExtendedCell cell = scanner.getCell(); List tagsFromCell = PrivateCellUtil.getTags(cell); assertTrue(tagsFromCell.size() > 0); for (Tag tag : tagsFromCell) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 5e3e52de6ad4..140f871e4386 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -42,9 +42,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -365,7 +366,7 @@ public void testWithDeletes() throws Throwable { s.setRaw(true); ResultScanner scanner = t.getScanner(s); Result r = scanner.next(); - Cell[] res = r.rawCells(); + ExtendedCell[] res = PackagePrivateFieldAccessor.getExtendedRawCells(r); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); assertEquals(now + 4, res[1].getTimestamp()); assertEquals(now + 3, res[2].getTimestamp()); @@ -646,13 +647,12 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(ctx).write(any(), any()); importer.setup(ctx); - Result value = mock(Result.class); KeyValue[] keys = { new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), Bytes.toBytes("value")), new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), Bytes.toBytes("value1")) }; - when(value.rawCells()).thenReturn(keys); + Result value = Result.create(keys); importer.map(new ImmutableBytesWritable(Bytes.toBytes("Key")), value, ctx); } @@ -820,7 +820,7 @@ public void testTagsAddition() throws Throwable { } private void checkWhetherTagExists(TableName table, boolean tagExists) throws IOException { - List values = new ArrayList<>(); + List values = new ArrayList<>(); for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { Scan scan = new Scan(); // Make sure to set rawScan to true so that we will get Delete Markers. @@ -830,13 +830,13 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO // Need to use RegionScanner instead of table#getScanner since the latter will // not return tags since it will go through rpc layer and remove tags intentionally. RegionScanner scanner = region.getScanner(scan); - scanner.next(values); + scanner.next((List) values); if (!values.isEmpty()) { break; } } boolean deleteFound = false; - for (Cell cell : values) { + for (ExtendedCell cell : values) { if (PrivateCellUtil.isDelete(cell.getType().getCode())) { deleteFound = true; List tags = PrivateCellUtil.getTags(cell); @@ -881,11 +881,11 @@ public void preBatchMutate(ObserverContext c, } Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - Cell cell = cellScanner.current(); + for (ExtendedCellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { + ExtendedCell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); - Cell updatedCell = PrivateCellUtil.createCell((ExtendedCell) cell, tags); + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); updatedCells.add(updatedCell); } m.getFamilyCellMap().clear(); @@ -934,9 +934,10 @@ public void testTagsWithEmptyCodec() throws Exception { int count = 0; Result result; while ((result = scanner.next()) != null) { - List cells = result.listCells(); + List cells = + Arrays.asList(PackagePrivateFieldAccessor.getExtendedRawCells(result)); assertEquals(2, cells.size()); - Cell cell = cells.get(0); + ExtendedCell cell = cells.get(0); assertTrue(CellUtil.isDelete(cell)); List tags = PrivateCellUtil.getTags(cell); assertEquals(0, tags.size()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java index e2b45fe30c3c..a0ca5b990dd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.util.Collections; import java.util.List; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.util.Pair; @@ -94,7 +94,7 @@ private void call(HRegionLocation loc) { err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } - Pair pair = ReplicationProtobufUtil + Pair pair = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries, replica.getEncodedNameAsBytes(), null, null, null); resetCallTimeout(); controller.setCellScanner(pair.getSecond()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java index f5fcc02e9186..81707fe1f16b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.concurrent.CompletableFuture; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.yetus.audience.InterfaceAudience; @@ -95,7 +95,8 @@ private interface RpcCall { void call(AdminService.Interface stub, HBaseRpcController controller, RpcCallback done); } - private CompletableFuture call(RpcCall rpcCall, CellScanner cellScanner) { + private CompletableFuture call(RpcCall rpcCall, + ExtendedCellScanner cellScanner) { CompletableFuture future = new CompletableFuture<>(); HBaseRpcController controller = conn.rpcControllerFactory.newController(null, cellScanner); try { @@ -158,8 +159,8 @@ public CompletableFuture compactRegion(CompactRegionReque return call((stub, controller, done) -> stub.compactRegion(controller, request, done)); } - public CompletableFuture - replicateWALEntry(ReplicateWALEntryRequest request, CellScanner cellScanner, int timeout) { + public CompletableFuture replicateWALEntry( + ReplicateWALEntryRequest request, ExtendedCellScanner cellScanner, int timeout) { return call((stub, controller, done) -> { controller.setCallTimeout(timeout); stub.replicateWALEntry(controller, request, done); @@ -167,7 +168,7 @@ public CompletableFuture compactRegion(CompactRegionReque } public CompletableFuture replay(ReplicateWALEntryRequest request, - CellScanner cellScanner) { + ExtendedCellScanner cellScanner) { return call((stub, controller, done) -> stub.replay(controller, request, done), cellScanner); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index a6d6940e1e4c..8482a819c0d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -46,7 +46,7 @@ static class MessageEncoder extends BaseEncoder { } @Override - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { checkFlushed(); CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); // This copies bytes from Cell to ByteString. I don't see anyway around the copy. @@ -75,7 +75,7 @@ static class MessageDecoder extends BaseDecoder { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in), false); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 0c32303746c0..9900aa63cab6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -369,7 +370,7 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, HFileSc Set foundMobFiles = new LinkedHashSet<>(FOUND_MOB_FILES_CACHE_CAPACITY); Set missingMobFiles = new LinkedHashSet<>(MISSING_MOB_FILES_CACHE_CAPACITY); do { - Cell cell = scanner.getCell(); + ExtendedCell cell = scanner.getCell(); if (row != null && row.length != 0) { int result = CellComparator.getInstance().compareRows(cell, row, 0, row.length); if (result > 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 0134e11d8914..ce3ea3fd9414 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import java.nio.channels.ClosedChannelException; import org.apache.hadoop.hbase.CallDroppedException; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; @@ -111,7 +111,7 @@ public void run() { } Throwable errorThrowable = null; String error = null; - Pair resultPair = null; + Pair resultPair = null; RpcServer.CurCall.set(call); final Span ipcServerSpan = new IpcServerSpanBuilder(call).build(); try (Scope ignored1 = ipcServerSpan.makeCurrent()) { @@ -156,7 +156,7 @@ public void run() { call.cleanup(); // Set the response Message param = resultPair != null ? resultPair.getFirst() : null; - CellScanner cells = resultPair != null ? resultPair.getSecond() : null; + ExtendedCellScanner cells = resultPair != null ? resultPair.getSecond() : null; call.setResponse(param, cells, errorThrowable, error); call.sendResponseIfReady(); // don't touch `span` here because its status and `end()` are managed in `call#setResponse()` diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java index 4f0540da80a7..62cac989d8a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.net.InetAddress; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; @@ -39,7 +39,7 @@ class NettyServerCall extends ServerCall { NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size, + Message param, ExtendedCellScanner cellScanner, NettyServerRpcConnection connection, long size, InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator bbAllocator, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java index f52357539dec..f63b8d2730f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; @@ -107,7 +107,7 @@ public boolean isConnectionOpen() { @Override public NettyServerCall createCall(int id, final BlockingService service, - final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, + final MethodDescriptor md, RequestHeader header, Message param, ExtendedCellScanner cellScanner, long size, final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 2d06aa7c47af..804d7b32bb42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -48,7 +48,7 @@ public interface RpcCall extends RpcCallContext { Message getParam(); /** Returns The CellScanner that can carry input and result payload. */ - CellScanner getCellScanner(); + ExtendedCellScanner getCellScanner(); /** Returns The timestamp when the call is constructed. */ long getReceiveTime(); @@ -117,7 +117,8 @@ public interface RpcCall extends RpcCallContext { * @param errorThrowable The error Throwable resulting from the call. * @param error Extra error message. */ - void setResponse(Message param, CellScanner cells, Throwable errorThrowable, String error); + void setResponse(Message param, ExtendedCellScanner cells, Throwable errorThrowable, + String error); /** * Send the response of this RPC call. Implementation provides the underlying facility diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index a84d132a0132..4ff1a0b54828 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.conf.ConfigurationObserver; @@ -428,7 +429,7 @@ public void setSecretManager(SecretManager secretMana * the protobuf response. */ @Override - public Pair call(RpcCall call, MonitoredRPCHandler status) + public Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException { try { MethodDescriptor md = call.getMethod(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index 2c0dd1cc2b0e..9bf5fc3817dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; @@ -45,7 +45,8 @@ public interface RpcServerInterface { InetSocketAddress getListenerAddress(); - Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; + Pair call(RpcCall call, MonitoredRPCHandler status) + throws IOException; void setErrorHandler(HBaseRPCErrorHandler handler); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 25d153c068aa..c2c6b4d063c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -30,8 +30,8 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseServerException; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -72,7 +72,7 @@ public abstract class ServerCall implements RpcCa protected final RequestHeader header; protected Message param; // the parameter passed // Optional cell data passed outside of protobufs. - protected final CellScanner cellScanner; + protected final ExtendedCellScanner cellScanner; protected final T connection; // connection to client protected final long receiveTime; // the time received when response is null // the time served when response is not null @@ -120,8 +120,8 @@ public abstract class ServerCall implements RpcCa @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, T connection, long size, InetAddress remoteAddress, - long receiveTime, int timeout, ByteBuffAllocator byteBuffAllocator, + Message param, ExtendedCellScanner cellScanner, T connection, long size, + InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator byteBuffAllocator, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { this.id = id; this.service = service; @@ -273,7 +273,7 @@ public String toShortString() { } @Override - public synchronized void setResponse(Message m, final CellScanner cells, Throwable t, + public synchronized void setResponse(Message m, final ExtendedCellScanner cells, Throwable t, String errorMsg) { if (this.isError) { return; @@ -544,7 +544,7 @@ public Message getParam() { } @Override - public CellScanner getCellScanner() { + public ExtendedCellScanner getCellScanner() { return cellScanner; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 31f46f30c382..c17a8da90416 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -39,8 +39,8 @@ import org.apache.commons.crypto.cipher.CryptoCipherFactory; import org.apache.commons.crypto.random.CryptoRandom; import org.apache.commons.crypto.random.CryptoRandomFactory; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.client.ConnectionRegistryEndpoint; import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.codec.Codec; @@ -588,7 +588,7 @@ protected void processRequest(ByteBuff buf) throws IOException, InterruptedExcep } MethodDescriptor md = null; Message param = null; - CellScanner cellScanner = null; + ExtendedCellScanner cellScanner = null; try { if (header.hasRequestParam() && header.getRequestParam()) { md = this.service.getDescriptorForType().findMethodByName(header.getMethodName()); @@ -816,7 +816,7 @@ boolean isSimpleAuthentication() { public abstract boolean isConnectionOpen(); public abstract ServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, + RequestHeader header, Message param, ExtendedCellScanner cellScanner, long size, InetAddress remoteAddress, int timeout, CallCleanup reqCleanup); private static class ByteBuffByteInput extends ByteInput { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java index 5c5e9102115c..aa6e2b7b4aca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.net.InetAddress; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +43,7 @@ class SimpleServerCall extends ServerCall { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, + RequestHeader header, Message param, ExtendedCellScanner cellScanner, SimpleServerRpcConnection connection, long size, final InetAddress remoteAddress, long receiveTime, int timeout, ByteBuffAllocator bbAllocator, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 1b28c19b4306..e8619b2eb7f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -30,8 +30,8 @@ import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; @@ -456,7 +456,7 @@ public boolean isConnectionOpen() { @Override public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, + RequestHeader header, Message param, ExtendedCellScanner cellScanner, long size, InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index 2fadc83340ed..66160dd4aa64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -120,7 +120,7 @@ public static Date parseDate(String dateString) throws ParseException { * @param cell The current cell. * @return True if the cell has a mob reference tag, false if it doesn't. */ - public static boolean isMobReferenceCell(Cell cell) { + public static boolean isMobReferenceCell(ExtendedCell cell) { if (cell.getTagsLength() > 0) { Optional tag = PrivateCellUtil.getTag(cell, TagType.MOB_REFERENCE_TAG_TYPE); if (tag.isPresent()) { @@ -135,7 +135,7 @@ public static boolean isMobReferenceCell(Cell cell) { * @param cell The current cell. * @return The table name tag. */ - private static Optional getTableNameTag(Cell cell) { + private static Optional getTableNameTag(ExtendedCell cell) { Optional tag = Optional.empty(); if (cell.getTagsLength() > 0) { tag = PrivateCellUtil.getTag(cell, TagType.MOB_TABLE_NAME_TAG_TYPE); @@ -148,7 +148,7 @@ private static Optional getTableNameTag(Cell cell) { * @param cell to extract tag from * @return table name as a string. empty if the tag is not found. */ - public static Optional getTableNameString(Cell cell) { + public static Optional getTableNameString(ExtendedCell cell) { Optional tag = getTableNameTag(cell); Optional name = Optional.empty(); if (tag.isPresent()) { @@ -162,7 +162,7 @@ public static Optional getTableNameString(Cell cell) { * @param cell to extract tag from * @return name of table as a TableName. empty if the tag is not found. */ - public static Optional getTableName(Cell cell) { + public static Optional getTableName(ExtendedCell cell) { Optional maybe = getTableNameTag(cell); Optional name = Optional.empty(); if (maybe.isPresent()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java index 6754fdef08ab..0b1736e21a46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java @@ -24,10 +24,11 @@ import java.util.concurrent.CompletableFuture; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.io.SizedCellScanner; +import org.apache.hadoop.hbase.io.SizedExtendedCellScanner; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -56,8 +57,8 @@ public class ReplicationProtobufUtil { public static CompletableFuture replicateWALEntry( AsyncRegionServerAdmin admin, Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir, int timeout) { - Pair p = buildReplicateWALEntryRequest(entries, null, - replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir); + Pair p = buildReplicateWALEntryRequest(entries, + null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir); return admin.replicateWALEntry(p.getFirst(), p.getSecond(), timeout); } @@ -66,7 +67,7 @@ public static CompletableFuture replicateWALEntry( * @param entries the WAL entries to be replicated * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found. */ - public static Pair + public static Pair buildReplicateWALEntryRequest(final Entry[] entries) { return buildReplicateWALEntryRequest(entries, null, null, null, null); } @@ -81,11 +82,11 @@ public static CompletableFuture replicateWALEntry( * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found. */ - public static Pair buildReplicateWALEntryRequest( + public static Pair buildReplicateWALEntryRequest( final Entry[] entries, byte[] encodedRegionName, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) { // Accumulate all the Cells seen in here. - List> allCells = new ArrayList<>(entries.length); + List> allCells = new ArrayList<>(entries.length); int size = 0; WALEntry.Builder entryBuilder = WALEntry.newBuilder(); ReplicateWALEntryRequest.Builder builder = ReplicateWALEntryRequest.newBuilder(); @@ -104,7 +105,8 @@ public static Pair buildReplicateWALEntry } entryBuilder.setKey(keyBuilder.build()); WALEdit edit = entry.getEdit(); - List cells = edit.getCells(); + // TODO: avoid this cast + List cells = (List) edit.getCells(); // Add up the size. It is used later serializing out the kvs. for (Cell cell : cells) { size += PrivateCellUtil.estimatedSerializedSizeOf(cell); @@ -130,21 +132,24 @@ public static Pair buildReplicateWALEntry } /** Returns cells packaged as a CellScanner */ - static CellScanner getCellScanner(final List> cells, final int size) { - return new SizedCellScanner() { - private final Iterator> entries = cells.iterator(); - private Iterator currentIterator = null; - private Cell currentCell; + static ExtendedCellScanner getCellScanner(final List> cells, + final int size) { + return new SizedExtendedCellScanner() { + private final Iterator> entries = cells.iterator(); + private Iterator currentIterator = null; + private ExtendedCell currentCell; @Override - public Cell current() { + public ExtendedCell current() { return this.currentCell; } @Override public boolean advance() { if (this.currentIterator == null) { - if (!this.entries.hasNext()) return false; + if (!this.entries.hasNext()) { + return false; + } this.currentIterator = this.entries.next().iterator(); } if (this.currentIterator.hasNext()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 2792ab2754cd..68f5356f5549 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -112,6 +112,7 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -3512,7 +3513,7 @@ protected void checkAndPrepareMutation(int index, long timestamp) throws IOExcep // store the family map reference to allow for mutations // we know that in mutation, only ExtendedCells are allow so here we do a fake cast, to // simplify later logic - familyCellMaps[index] = (Map) mutation.getFamilyCellMap(); + familyCellMaps[index] = PackagePrivateFieldAccessor.getExtendedFamilyCellMap(mutation); } // store durability for the batch (highest durability of all operations in the batch) @@ -4063,7 +4064,8 @@ private Map> reckonDeltas(Mutation mutation, assert mutation instanceof Increment || mutation instanceof Append; Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); // Process a Store/family at a time. - for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : PackagePrivateFieldAccessor + .getExtendedFamilyCellMap(mutation).entrySet()) { final byte[] columnFamilyName = entry.getKey(); List deltas = (List) entry.getValue(); // Reckon for the Store what to apply to WAL and MemStore. @@ -4184,9 +4186,9 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, return cellPairs.stream().map(Pair::getSecond).collect(Collectors.toList()); } - private static ExtendedCell reckonDelta(final Cell delta, final Cell currentCell, - final byte[] columnFamily, final long now, Mutation mutation, Function supplier) - throws IOException { + private static ExtendedCell reckonDelta(final ExtendedCell delta, + final ExtendedCell currentCell, final byte[] columnFamily, final long now, Mutation mutation, + Function supplier) throws IOException { // Forward any tags found on the delta. List tags = TagUtil.carryForwardTags(delta); if (currentCell != null) { @@ -4204,7 +4206,6 @@ private static ExtendedCell reckonDelta(final Cell delta, final Cell currentCell } else { tags = TagUtil.carryForwardTTLTag(tags, mutation.getTTL()); PrivateCellUtil.updateLatestStamp(delta, now); - assert delta instanceof ExtendedCell; ExtendedCell deltaCell = (ExtendedCell) delta; return CollectionUtils.isEmpty(tags) ? deltaCell @@ -4522,7 +4523,8 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> cpFamilyMap = (Map) cpMutation.getFamilyCellMap(); + Map> cpFamilyMap = + PackagePrivateFieldAccessor.getExtendedFamilyCellMap(cpMutation); region.rewriteCellTags(cpFamilyMap, mutation); // will get added to the memStore later mergeFamilyMaps(familyCellMaps[i], cpFamilyMap); @@ -5094,14 +5096,16 @@ private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutat byte[] byteTs = Bytes.toBytes(ts); if (mutation != null) { if (mutation instanceof Put) { - updateCellTimestamps((Iterable) mutation.getFamilyCellMap().values(), byteTs); + updateCellTimestamps( + PackagePrivateFieldAccessor.getExtendedFamilyCellMap(mutation).values(), byteTs); } // And else 'delete' is not needed since it already does a second get, and sets the // timestamp from get (see prepareDeleteTimestamps). } else { for (Mutation m : rowMutations.getMutations()) { if (m instanceof Put) { - updateCellTimestamps((Iterable) m.getFamilyCellMap().values(), byteTs); + updateCellTimestamps( + PackagePrivateFieldAccessor.getExtendedFamilyCellMap(m).values(), byteTs); } } // And else 'delete' is not needed since it already does a second get, and sets the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobReferenceOnlyFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobReferenceOnlyFilter.java index 4efc29dea100..f3191c6374d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobReferenceOnlyFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobReferenceOnlyFilter.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -30,9 +31,9 @@ class MobReferenceOnlyFilter extends FilterBase { @Override public ReturnCode filterCell(final Cell cell) { - if (null != cell) { + if (null != cell && cell instanceof ExtendedCell) { // If a cell with a mob reference tag, it's included. - if (MobUtils.isMobReferenceCell(cell)) { + if (MobUtils.isMobReferenceCell((ExtendedCell) cell)) { return ReturnCode.INCLUDE; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 66c97fb9401f..5e16b08369b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -47,11 +47,12 @@ import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HConstants; @@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.OperationWithAttributes; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -581,7 +583,7 @@ private void addResults(ScanResponse.Builder builder, List results, builder.addCellsPerResult(res.size()); builder.addPartialFlagPerResult(res.mayHaveMoreCellsInRow()); } - controller.setCellScanner(CellUtil.createCellScanner(results)); + controller.setCellScanner(PrivateCellUtil.createExtendedCellScanner(results)); } else { for (Result res : results) { ClientProtos.Result pbr = ProtobufUtil.toResult(res); @@ -725,10 +727,10 @@ private Result increment(final HRegion region, final OperationQuota quota, * @param context the current RpcCallContext * @return Return the cellScanner passed */ - private List doNonAtomicRegionMutation(final HRegion region, + private List doNonAtomicRegionMutation(final HRegion region, final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner, - final RegionActionResult.Builder builder, List cellsToReturn, long nonceGroup, - final RegionScannersCloseCallBack closeCallBack, RpcCallContext context, + final RegionActionResult.Builder builder, List cellsToReturn, + long nonceGroup, final RegionScannersCloseCallBack closeCallBack, RpcCallContext context, ActivePolicyEnforcement spaceQuotaEnforcement) { // Gather up CONTIGUOUS Puts and Deletes in this mutations List. Idea is that rather than do // one at a time, we instead pass them in batch. Be aware that the corresponding @@ -2053,9 +2055,9 @@ public WarmupRegionResponse warmupRegion(final RpcController controller, return response; } - private CellScanner getAndReset(RpcController controller) { + private ExtendedCellScanner getAndReset(RpcController controller) { HBaseRpcController hrc = (HBaseRpcController) controller; - CellScanner cells = hrc.cellScanner(); + ExtendedCellScanner cells = hrc.cellScanner(); hrc.setCellScanner(null); return cells; } @@ -2498,8 +2500,8 @@ public GetResponse get(final RpcController controller, final GetRequest request) && VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 3) ) { pbr = ProtobufUtil.toResultNoData(r); - ((HBaseRpcController) controller) - .setCellScanner(CellUtil.createCellScanner(r.rawCells())); + ((HBaseRpcController) controller).setCellScanner(PrivateCellUtil + .createExtendedCellScanner(PackagePrivateFieldAccessor.getExtendedRawCells(r))); addSize(context, r); } else { pbr = ProtobufUtil.toResult(r); @@ -2648,10 +2650,7 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) // rpc controller is how we bring in data via the back door; it is unprotobuf'ed data. // It is also the conduit via which we pass back data. HBaseRpcController controller = (HBaseRpcController) rpcc; - CellScanner cellScanner = controller != null ? controller.cellScanner() : null; - if (controller != null) { - controller.setCellScanner(null); - } + CellScanner cellScanner = controller != null ? getAndReset(controller) : null; long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; @@ -2732,7 +2731,7 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) } // this will contain all the cells that we need to return. It's created later, if needed. - List cellsToReturn = null; + List cellsToReturn = null; RegionScannersCloseCallBack closeCallBack = null; RpcCallContext context = RpcServer.getCurrentCall().orElse(null); Map regionStats = @@ -2858,7 +2857,7 @@ public MultiResponse multi(final RpcController rpcc, final MultiRequest request) } // Load the controller with the Cells to return. if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) { - controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn)); + controller.setCellScanner(PrivateCellUtil.createExtendedCellScanner(cellsToReturn)); } MultiRegionLoadStats.Builder builder = MultiRegionLoadStats.newBuilder(); @@ -3427,7 +3426,10 @@ private void scan(HBaseRpcController controller, ScanRequest request, RegionScan int lastIdx = results.size() - 1; Result r = results.get(lastIdx); if (r.mayHaveMoreCellsInRow()) { - results.set(lastIdx, Result.create(r.rawCells(), r.getExists(), r.isStale(), false)); + results.set(lastIdx, + PackagePrivateFieldAccessor.createResult( + PackagePrivateFieldAccessor.getExtendedRawCells(r), r.getExists(), r.isStale(), + false)); } } boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java index 505cd5dedcee..81a4cc467f98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java @@ -75,7 +75,7 @@ public boolean next(List outResult, ScannerContext ctx) throws IOException for (int i = 0; i < outResult.size(); i++) { Cell cell = outResult.get(i); assert cell instanceof ExtendedCell; - if (MobUtils.isMobReferenceCell(cell)) { + if (MobUtils.isMobReferenceCell((ExtendedCell) cell)) { MobCell mobCell = mobStore.resolve((ExtendedCell) cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); mobKVCount++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index bb874a001d2f..dff2052efe78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.io.ByteBufferWriter; @@ -139,7 +140,7 @@ public void append(Entry entry) { } try { for (Cell cell : entry.getEdit().getCells()) { - cellEncoder.write(cell); + cellEncoder.write((ExtendedCell) cell); } } catch (IOException e) { throw new AssertionError("should not happen", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java index 52317949cc83..6e6727db085d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.util.AtomicUtils; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -58,7 +59,7 @@ public void append(Entry entry) throws IOException { .writeDelimitedTo(output); for (Cell cell : entry.getEdit().getCells()) { // cellEncoder must assume little about the stream, since we write PB and cells in turn. - cellEncoder.write(cell); + cellEncoder.write((ExtendedCell) cell); } length.set(output.getPos()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java index 754368f73f3c..9adeb958bd0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java @@ -24,7 +24,6 @@ import java.io.OutputStream; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -79,7 +78,7 @@ public EncryptedKvDecoder(InputStream in, Decryptor decryptor) { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { if (this.decryptor == null) { return super.parseCell(); } @@ -174,15 +173,12 @@ public EncryptedKvEncoder(OutputStream os, Encryptor encryptor) { } @Override - public void write(Cell c) throws IOException { + public void write(ExtendedCell cell) throws IOException { if (encryptor == null) { - super.write(c); + super.write(cell); return; } - assert c instanceof ExtendedCell; - ExtendedCell cell = (ExtendedCell) c; - byte[] iv = nextIv(); encryptor.setIv(iv); encryptor.reset(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index e6a20b0d0206..87154a62066c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -233,9 +233,7 @@ public CompressedKvEncoder(OutputStream out, CompressionContext compression) { } @Override - public void write(Cell c) throws IOException { - assert c instanceof ExtendedCell; - ExtendedCell cell = (ExtendedCell) c; + public void write(ExtendedCell cell) throws IOException { // We first write the KeyValue infrastructure as VInts. StreamUtils.writeRawVInt32(out, KeyValueUtil.keyLength(cell)); StreamUtils.writeRawVInt32(out, cell.getValueLength()); @@ -290,7 +288,7 @@ public CompressedKvDecoder(InputStream in, CompressionContext compression) { } @Override - protected Cell parseCell() throws IOException { + protected ExtendedCell parseCell() throws IOException { int keylength = StreamUtils.readRawVarint32(in); int vlength = StreamUtils.readRawVarint32(in); int tagsLength = StreamUtils.readRawVarint32(in); @@ -396,7 +394,7 @@ public EnsureKvEncoder(OutputStream out) { } @Override - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Make sure to write tags into WAL ByteBufferUtils.putInt(this.out, KeyValueUtil.getSerializedSize(cell, true)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index a60186c13dc7..427fe80b0c36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -23,8 +23,8 @@ import java.util.List; import java.util.concurrent.locks.Lock; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable; import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; @@ -106,8 +106,9 @@ private void replayWAL(String wal) throws IOException { try { List entries = readWALEntries(reader, wal); while (!entries.isEmpty()) { - Pair pair = ReplicationProtobufUtil - .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); + Pair pair = + ReplicationProtobufUtil + .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); ReplicateWALEntryRequest request = pair.getFirst(); rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), pair.getSecond(), request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index d0c19d7cfcd5..f0158f299f22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -35,13 +35,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -644,7 +644,7 @@ private void checkForReservedTagPresence(User user, Mutation m) throws IOExcepti if (m.getAttribute(TAG_CHECK_PASSED) != null) { return; } - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { + for (ExtendedCellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Iterator tagsItr = PrivateCellUtil.tagsIterator(cellScanner.current()); while (tagsItr.hasNext()) { if (tagsItr.next().getType() == PermissionStorage.ACL_TAG_TYPE) { @@ -1732,8 +1732,9 @@ private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell // there is no need to rewrite them again. Just extract non-acl tags of newCell if we need to // add a new acl tag for the cell. Actually, oldCell is useless here. List tags = Lists.newArrayList(); - if (newCell != null) { - Iterator tagIterator = PrivateCellUtil.tagsIterator(newCell); + ExtendedCell newExtendedCell = (ExtendedCell) newCell; + if (newExtendedCell != null) { + Iterator tagIterator = PrivateCellUtil.tagsIterator(newExtendedCell); while (tagIterator.hasNext()) { Tag tag = tagIterator.next(); if (tag.getType() != PermissionStorage.ACL_TAG_TYPE) { @@ -1750,8 +1751,7 @@ private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell // We have checked the ACL tag of mutation is not null. // So that the tags could not be empty. tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, mutation.getACL())); - assert newCell instanceof ExtendedCell; - return PrivateCellUtil.createCell((ExtendedCell) newCell, tags); + return PrivateCellUtil.createCell(newExtendedCell, tags); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java index 023eccbd27d3..c27063752c58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.security.Superusers; @@ -452,7 +453,9 @@ private boolean authorizeFamily(Set permissions, TableName tabl */ public boolean authorizeCell(User user, TableName table, Cell cell, Permission.Action action) { try { - List perms = PermissionStorage.getCellPermissionsForUser(user, cell); + assert cell instanceof ExtendedCell; + List perms = + PermissionStorage.getCellPermissionsForUser(user, (ExtendedCell) cell); if (LOG.isTraceEnabled()) { LOG.trace("Perms for user {} in table {} in cell {}: {}", user.getShortName(), table, cell, (perms != null ? perms : "")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java index 7a4444291017..b66c0ed0b099 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -866,7 +867,7 @@ public static byte[] fromNamespaceEntry(byte[] namespace) { return Arrays.copyOfRange(namespace, 1, namespace.length); } - public static List getCellPermissionsForUser(User user, Cell cell) + public static List getCellPermissionsForUser(User user, ExtendedCell cell) throws IOException { // Save an object allocation where we can if (cell.getTagsLength() == 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 0e98fd456aed..7433b73ff3de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -35,10 +35,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -300,10 +300,9 @@ public void preBatchMutate(ObserverContext c, boolean sanityFailure = false; boolean modifiedTagFound = false; Pair pair = new Pair<>(false, null); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - Cell cell = cellScanner.current(); - assert cell instanceof ExtendedCell; - pair = checkForReservedVisibilityTagPresence((ExtendedCell) cell, pair); + for (ExtendedCellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { + ExtendedCell cell = cellScanner.current(); + pair = checkForReservedVisibilityTagPresence(cell, pair); if (!pair.getFirst()) { // Don't disallow reserved tags if authorization is disabled if (authorizationEnabled) { @@ -342,10 +341,8 @@ public void preBatchMutate(ObserverContext c, } if (visibilityTags != null) { List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - Cell ce = cellScanner.current(); - assert ce instanceof ExtendedCell; - ExtendedCell cell = (ExtendedCell) ce; + for (ExtendedCellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { + ExtendedCell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); if (modifiedTagFound) { // Rewrite the tags by removing the modified tags. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCustomPriorityRpcControllerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCustomPriorityRpcControllerFactory.java index fde491983cea..435037ab17d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCustomPriorityRpcControllerFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCustomPriorityRpcControllerFactory.java @@ -281,12 +281,12 @@ public HBaseRpcController newController() { } @Override - public HBaseRpcController newController(CellScanner cellScanner) { + public HBaseRpcController newController(ExtendedCellScanner cellScanner) { return new PriorityController(EXPECTED_PRIORITY.get(), super.newController(cellScanner)); } @Override - public HBaseRpcController newController(List cellIterables) { + public HBaseRpcController newController(List cellIterables) { return new PriorityController(EXPECTED_PRIORITY.get(), super.newController(cellIterables)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java index 0baf0443800e..21831a67b0ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java @@ -23,7 +23,7 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -63,8 +63,8 @@ public void test() throws IOException { assertSame(ScanResultCache.EMPTY_RESULT_ARRAY, resultCache.addAndGet(ScanResultCache.EMPTY_RESULT_ARRAY, true)); - Cell[] cells1 = createCells(CF, 1, 10); - Cell[] cells2 = createCells(CF, 2, 10); + ExtendedCell[] cells1 = createCells(CF, 1, 10); + ExtendedCell[] cells2 = createCells(CF, 2, 10); Result[] results1 = resultCache.addAndGet( new Result[] { Result.create(Arrays.copyOf(cells1, 5), null, false, true) }, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java index b8c57fb5f6b5..dd0074ae6913 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -56,12 +56,12 @@ public void tearDown() { resultCache = null; } - static Cell createCell(byte[] cf, int key, int cq) { + static ExtendedCell createCell(byte[] cf, int key, int cq) { return new KeyValue(Bytes.toBytes(key), cf, Bytes.toBytes("cq" + cq), Bytes.toBytes(key)); } - static Cell[] createCells(byte[] cf, int key, int numCqs) { - Cell[] cells = new Cell[numCqs]; + static ExtendedCell[] createCells(byte[] cf, int key, int numCqs) { + ExtendedCell[] cells = new ExtendedCell[numCqs]; for (int i = 0; i < numCqs; i++) { cells[i] = createCell(cf, key, i); } @@ -83,9 +83,9 @@ public void test() throws IOException { assertSame(ScanResultCache.EMPTY_RESULT_ARRAY, resultCache.addAndGet(ScanResultCache.EMPTY_RESULT_ARRAY, true)); - Cell[] cells1 = createCells(CF, 1, 10); - Cell[] cells2 = createCells(CF, 2, 10); - Cell[] cells3 = createCells(CF, 3, 10); + ExtendedCell[] cells1 = createCells(CF, 1, 10); + ExtendedCell[] cells2 = createCells(CF, 2, 10); + ExtendedCell[] cells3 = createCells(CF, 3, 10); assertEquals(0, resultCache.addAndGet( new Result[] { Result.create(Arrays.copyOf(cells1, 3), null, false, true) }, false).length); Result[] results = resultCache diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 8e5895c6914e..2e4a13fc8ce4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -232,7 +233,7 @@ public void testKeepDeletedCells() throws Exception { s.setTimeRange(0, ts + 3); s.readAllVersions(); ResultScanner scanner = h.getScanner(s); - Cell[] kvs = scanner.next().rawCells(); + ExtendedCell[] kvs = scanner.next().rawExtendedCells(); assertArrayEquals(T2, CellUtil.cloneValue(kvs[0])); assertArrayEquals(T1, CellUtil.cloneValue(kvs[1])); scanner.close(); @@ -241,7 +242,7 @@ public void testKeepDeletedCells() throws Exception { s.setRaw(true); s.readAllVersions(); scanner = h.getScanner(s); - kvs = scanner.next().rawCells(); + kvs = scanner.next().rawExtendedCells(); assertTrue(PrivateCellUtil.isDeleteFamily(kvs[0])); assertArrayEquals(T3, CellUtil.cloneValue(kvs[1])); assertTrue(CellUtil.isDelete(kvs[2])); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java index f2b0f75724d8..be70c4c5cd64 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -566,8 +567,8 @@ public void testIncrementWithTtlTags() throws Exception { int count = 0; Result result; while ((result = scanner.next()) != null) { - Cell[] cells = result.rawCells(); - for (Cell cell : cells) { + ExtendedCell[] cells = result.rawExtendedCells(); + for (ExtendedCell cell : cells) { List tags = PrivateCellUtil.getTags(cell); // Make sure there is only 1 tag. assertEquals(1, tags.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java index 64f4c89a34fa..37422c1f1a0f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java @@ -32,9 +32,11 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -199,14 +201,15 @@ public void testAtomicOperations() throws Exception { ProtobufUtil.toMutationNoData(ClientProtos.MutationProto.MutationType.PUT, put)))) .build(); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (Mutation m : rm.getMutations()) { cells.addAll(m.getCellList(FAMILY)); } cells.addAll(put.getCellList(FAMILY)); assertEquals(3, cells.size()); HBaseRpcController controller = Mockito.mock(HBaseRpcController.class); - Mockito.when(controller.cellScanner()).thenReturn(CellUtil.createCellScanner(cells)); + Mockito.when(controller.cellScanner()) + .thenReturn(PrivateCellUtil.createExtendedCellScanner(cells)); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(TEST_UTIL .getMiniHBaseCluster().getServerHoldingRegion(TABLE_NAME, r.getRegionInfo().getRegionName())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java index c5842629e600..1de686b253b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java @@ -23,7 +23,9 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -45,6 +47,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -83,15 +86,16 @@ public void testResultAsCellScanner() throws IOException { Cell[] cells = genKVs(row, family, value, 1, 10); Arrays.sort(cells, CellComparator.getInstance()); Result r = Result.create(cells); - assertSame(r, cells); + assertCellsSame(r, cells); // Assert I run over same result multiple times. - assertSame(r.cellScanner(), cells); - assertSame(r.cellScanner(), cells); + assertCellsSame(r.cellScanner(), cells); + assertCellsSame(r.cellScanner(), cells); // Assert we are not creating new object when doing cellscanner assertTrue(r == r.cellScanner()); } - private void assertSame(final CellScanner cellScanner, final Cell[] cells) throws IOException { + private void assertCellsSame(final CellScanner cellScanner, final Cell[] cells) + throws IOException { int count = 0; while (cellScanner.advance()) { assertTrue(cells[count].equals(cellScanner.current())); @@ -458,7 +462,6 @@ public void testEmptyResultIsReadonly() { * Microbenchmark that compares {@link Result#getValue} and {@link Result#loadValue} performance. */ public void doReadBenchmark() throws Exception { - final int n = 5; final int m = 100000000; @@ -516,6 +519,20 @@ public void doReadBenchmark() throws Exception { System.out.println("getValue(): " + (stop - start)); } + @Test + public void testCreateResultWithCellArray() { + Cell[] cells = genKVs(row, family, value, EnvironmentEdgeManager.currentTime(), 5); + Result r = Result.create(cells); + // the cells is actually a KeyValue[], which can be cast to ExtendedCell[] directly, so we + // should get the same one without copying + assertSame(cells, r.rawCells()); + + Cell[] emptyCells = new Cell[0]; + Result emptyResult = Result.create(emptyCells); + // emptyCells is a Cell[] instead of ExtendedCell[], so we need to copy it to a new array + assertNotSame(emptyCells, emptyResult.rawCells()); + } + /** * Calls non-functional test methods. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java index 272bf1c46096..2481cb200d80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java @@ -24,7 +24,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.CellOutputStream; import org.apache.hadoop.hbase.util.Bytes; @@ -42,8 +43,8 @@ public class CodecPerformance { @Deprecated public static final Logger LOG = LoggerFactory.getLogger(CodecPerformance.class); - static Cell[] getCells(final int howMany) { - Cell[] cells = new Cell[howMany]; + static ExtendedCell[] getCells(final int howMany) { + ExtendedCell[] cells = new ExtendedCell[howMany]; for (int i = 0; i < howMany; i++) { byte[] index = Bytes.toBytes(i); KeyValue kv = new KeyValue(index, Bytes.toBytes("f"), index, index); @@ -62,7 +63,7 @@ static int getRoughSize(final Cell[] cells) { } static byte[] runEncoderTest(final int index, final int initialBufferSize, - final ByteArrayOutputStream baos, final CellOutputStream encoder, final Cell[] cells) + final ByteArrayOutputStream baos, final CellOutputStream encoder, final ExtendedCell[] cells) throws IOException { long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < cells.length; i++) { @@ -76,9 +77,9 @@ static byte[] runEncoderTest(final int index, final int initialBufferSize, return baos.toByteArray(); } - static Cell[] runDecoderTest(final int index, final int count, final CellScanner decoder) - throws IOException { - Cell[] cells = new Cell[count]; + static ExtendedCell[] runDecoderTest(final int index, final int count, + final ExtendedCellScanner decoder) throws IOException { + ExtendedCell[] cells = new ExtendedCell[count]; long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; decoder.advance(); i++) { cells[i] = decoder.current(); @@ -94,10 +95,10 @@ static void verifyCells(final Cell[] input, final Cell[] output) { assertArrayEquals(input, output); } - static void doCodec(final Codec codec, final Cell[] cells, final int cycles, final int count, - final int initialBufferSize) throws IOException { + static void doCodec(final Codec codec, final ExtendedCell[] cells, final int cycles, + final int count, final int initialBufferSize) throws IOException { byte[] bytes = null; - Cell[] cellsDecoded = null; + ExtendedCell[] cellsDecoded = null; for (int i = 0; i < cycles; i++) { ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize); Codec.Encoder encoder = codec.getEncoder(baos); @@ -117,7 +118,7 @@ public static void main(String[] args) throws IOException { // How many times to do an operation; repeat gives hotspot chance to warm up. final int cycles = 30; - Cell[] cells = getCells(count); + ExtendedCell[] cells = getCells(count); int size = getRoughSize(cells); int initialBufferSize = 2 * size; // Multiply by 2 to ensure we don't have to grow buffer diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java index d580c3d7ff18..657f770dd32a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java @@ -26,10 +26,10 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -116,14 +116,14 @@ public void testThree() throws IOException { DataInputStream dis = new DataInputStream(cis); Codec.Decoder decoder = cmc.getDecoder(dis); assertTrue(decoder.advance()); - Cell c = decoder.current(); - assertTrue(CellUtil.equals(c, kv1)); + ExtendedCell c = decoder.current(); + assertTrue(PrivateCellUtil.equals(c, kv1)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellUtil.equals(c, kv2)); + assertTrue(PrivateCellUtil.equals(c, kv2)); assertTrue(decoder.advance()); c = decoder.current(); - assertTrue(CellUtil.equals(c, kv3)); + assertTrue(PrivateCellUtil.equals(c, kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset, cis.getCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java deleted file mode 100644 index b00296f213f4..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.coprocessor; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; -import org.apache.hadoop.hbase.testclassification.CoprocessorTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.WALEdit; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ CoprocessorTests.class, MediumTests.class }) -public class TestPassCustomCellViaRegionObserver { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPassCustomCellViaRegionObserver.class); - - @Rule - public TestName testName = new TestName(); - - private TableName tableName; - private Table table = null; - - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - - private static final byte[] ROW = Bytes.toBytes("ROW"); - private static final byte[] FAMILY = Bytes.toBytes("FAMILY"); - private static final byte[] QUALIFIER = Bytes.toBytes("QUALIFIER"); - private static final byte[] VALUE = Bytes.toBytes(10L); - private static final byte[] APPEND_VALUE = Bytes.toBytes("MB"); - - private static final byte[] QUALIFIER_FROM_CP = Bytes.toBytes("QUALIFIER_FROM_CP"); - - @BeforeClass - public static void setupBeforeClass() throws Exception { - // small retry number can speed up the failed tests. - UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); - UTIL.startMiniCluster(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - UTIL.shutdownMiniCluster(); - } - - @Before - public void clearTable() throws IOException { - RegionObserverImpl.COUNT.set(0); - tableName = TableName.valueOf(testName.getMethodName()); - if (table != null) { - table.close(); - } - try (Admin admin = UTIL.getAdmin()) { - for (TableName name : admin.listTableNames()) { - try { - admin.disableTable(name); - } catch (IOException e) { - } - admin.deleteTable(name); - } - table = UTIL.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(RegionObserverImpl.class.getName()).build(), null); - } - } - - @Test - public void testMutation() throws Exception { - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - byte[] value = VALUE; - assertResult(table.get(new Get(ROW)), value, value); - assertObserverHasExecuted(); - - Increment inc = new Increment(ROW); - inc.addColumn(FAMILY, QUALIFIER, 10L); - table.increment(inc); - // QUALIFIER -> 10 (put) + 10 (increment) - // QUALIFIER_FROM_CP -> 10 (from cp's put) + 10 (from cp's increment) - value = Bytes.toBytes(20L); - assertResult(table.get(new Get(ROW)), value, value); - assertObserverHasExecuted(); - - Append append = new Append(ROW); - append.addColumn(FAMILY, QUALIFIER, APPEND_VALUE); - table.append(append); - // 10L + "MB" - value = ByteBuffer.wrap(new byte[value.length + APPEND_VALUE.length]).put(value) - .put(APPEND_VALUE).array(); - assertResult(table.get(new Get(ROW)), value, value); - assertObserverHasExecuted(); - - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, QUALIFIER); - table.delete(delete); - assertTrue(Arrays.asList(table.get(new Get(ROW)).rawCells()).toString(), - table.get(new Get(ROW)).isEmpty()); - assertObserverHasExecuted(); - - assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put)); - assertObserverHasExecuted(); - - assertTrue( - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete)); - assertObserverHasExecuted(); - - assertTrue(table.get(new Get(ROW)).isEmpty()); - } - - @Test - public void testMultiPut() throws Exception { - List puts = - IntStream.range(0, 10).mapToObj(i -> new Put(ROW).addColumn(FAMILY, Bytes.toBytes(i), VALUE)) - .collect(Collectors.toList()); - table.put(puts); - assertResult(table.get(new Get(ROW)), VALUE); - assertObserverHasExecuted(); - - List deletes = - IntStream.range(0, 10).mapToObj(i -> new Delete(ROW).addColumn(FAMILY, Bytes.toBytes(i))) - .collect(Collectors.toList()); - table.delete(deletes); - assertTrue(table.get(new Get(ROW)).isEmpty()); - assertObserverHasExecuted(); - } - - private static void assertObserverHasExecuted() { - assertTrue(RegionObserverImpl.COUNT.getAndSet(0) > 0); - } - - private static void assertResult(Result result, byte[] expectedValue) { - assertFalse(result.isEmpty()); - for (Cell c : result.rawCells()) { - assertTrue(c.toString(), Bytes.equals(ROW, CellUtil.cloneRow(c))); - assertTrue(c.toString(), Bytes.equals(FAMILY, CellUtil.cloneFamily(c))); - assertTrue(c.toString(), Bytes.equals(expectedValue, CellUtil.cloneValue(c))); - } - } - - private static void assertResult(Result result, byte[] expectedValue, byte[] expectedFromCp) { - assertFalse(result.isEmpty()); - for (Cell c : result.rawCells()) { - assertTrue(c.toString(), Bytes.equals(ROW, CellUtil.cloneRow(c))); - assertTrue(c.toString(), Bytes.equals(FAMILY, CellUtil.cloneFamily(c))); - if (Bytes.equals(QUALIFIER, CellUtil.cloneQualifier(c))) { - assertTrue(c.toString(), Bytes.equals(expectedValue, CellUtil.cloneValue(c))); - } else if (Bytes.equals(QUALIFIER_FROM_CP, CellUtil.cloneQualifier(c))) { - assertTrue(c.toString(), Bytes.equals(expectedFromCp, CellUtil.cloneValue(c))); - } else { - fail("No valid qualifier"); - } - } - } - - private static Cell createCustomCell(byte[] row, byte[] family, byte[] qualifier, Cell.Type type, - byte[] value) { - return new Cell() { - - @Override - public long heapSize() { - return 0; - } - - private byte[] getArray(byte[] array) { - return array == null ? HConstants.EMPTY_BYTE_ARRAY : array; - } - - private int length(byte[] array) { - return array == null ? 0 : array.length; - } - - @Override - public byte[] getRowArray() { - return getArray(row); - } - - @Override - public int getRowOffset() { - return 0; - } - - @Override - public short getRowLength() { - return (short) length(row); - } - - @Override - public byte[] getFamilyArray() { - return getArray(family); - } - - @Override - public int getFamilyOffset() { - return 0; - } - - @Override - public byte getFamilyLength() { - return (byte) length(family); - } - - @Override - public byte[] getQualifierArray() { - return getArray(qualifier); - } - - @Override - public int getQualifierOffset() { - return 0; - } - - @Override - public int getQualifierLength() { - return length(qualifier); - } - - @Override - public long getTimestamp() { - return HConstants.LATEST_TIMESTAMP; - } - - @Override - public byte getTypeByte() { - return type.getCode(); - } - - @Override - public long getSequenceId() { - return 0; - } - - @Override - public byte[] getValueArray() { - return getArray(value); - } - - @Override - public int getValueOffset() { - return 0; - } - - @Override - public int getValueLength() { - return length(value); - } - - @Override - public int getSerializedSize() { - return KeyValueUtil.getSerializedSize(this, true); - } - - @Override - public byte[] getTagsArray() { - return getArray(null); - } - - @Override - public int getTagsOffset() { - return 0; - } - - @Override - public int getTagsLength() { - return length(null); - } - - @Override - public Type getType() { - return type; - } - }; - } - - private static Cell createCustomCell(Put put) { - return createCustomCell(put.getRow(), FAMILY, QUALIFIER_FROM_CP, Cell.Type.Put, VALUE); - } - - private static Cell createCustomCell(Append append) { - return createCustomCell(append.getRow(), FAMILY, QUALIFIER_FROM_CP, Cell.Type.Put, - APPEND_VALUE); - } - - private static Cell createCustomCell(Increment inc) { - return createCustomCell(inc.getRow(), FAMILY, QUALIFIER_FROM_CP, Cell.Type.Put, VALUE); - } - - private static Cell createCustomCell(Delete delete) { - return createCustomCell(delete.getRow(), FAMILY, QUALIFIER_FROM_CP, Cell.Type.DeleteColumn, - null); - } - - public static class RegionObserverImpl implements RegionCoprocessor, RegionObserver { - static final AtomicInteger COUNT = new AtomicInteger(0); - - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException { - put.add(createCustomCell(put)); - COUNT.incrementAndGet(); - } - - @Override - public void preDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException { - delete.add(createCustomCell(delete)); - COUNT.incrementAndGet(); - } - - @Override - public boolean preCheckAndPut(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, - boolean result) throws IOException { - put.add(createCustomCell(put)); - COUNT.incrementAndGet(); - return result; - } - - @Override - public boolean preCheckAndDelete(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { - delete.add(createCustomCell(delete)); - COUNT.incrementAndGet(); - return result; - } - - @Override - public Result preAppend(ObserverContext c, Append append) - throws IOException { - append.add(createCustomCell(append)); - COUNT.incrementAndGet(); - return null; - } - - @Override - public Result preIncrement(ObserverContext c, Increment increment) - throws IOException { - increment.add(createCustomCell(increment)); - COUNT.incrementAndGet(); - return null; - } - - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java index 406618295ca0..64f47e1d1bac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -247,7 +248,7 @@ public void testAppendTTLWithACLTag() throws Exception { } } - private static boolean checkAclTag(byte[] acl, Cell cell) { + private static boolean checkAclTag(byte[] acl, ExtendedCell cell) { Iterator iter = PrivateCellUtil.tagsIterator(cell); while (iter.hasNext()) { Tag tag = iter.next(); @@ -342,7 +343,10 @@ public List> postIncrementBeforeWAL( List> cellPairs) throws IOException { List> result = super.postIncrementBeforeWAL(ctx, mutation, cellPairs); for (Pair pair : result) { - if (mutation.getACL() != null && !checkAclTag(mutation.getACL(), pair.getSecond())) { + if ( + mutation.getACL() != null + && !checkAclTag(mutation.getACL(), (ExtendedCell) pair.getSecond()) + ) { throw new DoNotRetryIOException("Unmatched ACL tag."); } } @@ -355,7 +359,10 @@ public List> postAppendBeforeWAL( List> cellPairs) throws IOException { List> result = super.postAppendBeforeWAL(ctx, mutation, cellPairs); for (Pair pair : result) { - if (mutation.getACL() != null && !checkAclTag(mutation.getACL(), pair.getSecond())) { + if ( + mutation.getACL() != null + && !checkAclTag(mutation.getACL(), (ExtendedCell) pair.getSecond()) + ) { throw new DoNotRetryIOException("Unmatched ACL tag."); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index f64381a8a22e..26144d4adea4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -554,12 +555,14 @@ Lists. newArrayList( // Value for fam:qual1 should be stripped: assertEquals(Filter.ReturnCode.INCLUDE, flist.filterCell(kvQual1)); - final KeyValue transformedQual1 = KeyValueUtil.ensureKeyValue(flist.transformCell(kvQual1)); + final KeyValue transformedQual1 = + KeyValueUtil.ensureKeyValue((ExtendedCell) flist.transformCell(kvQual1)); assertEquals(0, transformedQual1.getValueLength()); // Value for fam:qual2 should not be stripped: assertEquals(Filter.ReturnCode.INCLUDE, flist.filterCell(kvQual2)); - final KeyValue transformedQual2 = KeyValueUtil.ensureKeyValue(flist.transformCell(kvQual2)); + final KeyValue transformedQual2 = + KeyValueUtil.ensureKeyValue((ExtendedCell) flist.transformCell(kvQual2)); assertEquals("value", Bytes.toString(transformedQual2.getValueArray(), transformedQual2.getValueOffset(), transformedQual2.getValueLength())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java index 024fb04c6873..afbaceebeea1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -88,7 +88,7 @@ public void doTest(Configuration conf, Path path, Compression.Algorithm compress scanner = reader.getScanner(conf, false, false); assertTrue("Initial seekTo failed", scanner.seekTo()); do { - Cell kv = scanner.getCell(); + ExtendedCell kv = scanner.getCell(); assertTrue("Read back an unexpected or invalid KV", testKvs.contains(KeyValueUtil.ensureKeyValue(kv))); i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java index 68ce5e359f56..24d69d88c101 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java @@ -24,11 +24,11 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.Codec.Decoder; import org.apache.hadoop.hbase.codec.Codec.Encoder; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; @@ -127,11 +127,11 @@ public void testKVCodecWithTagsForDecodedCellsWithNoTags() throws Exception { ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray()); Decoder decoder = codec.getDecoder(is); assertTrue(decoder.advance()); - assertTrue(CellUtil.equals(c1, decoder.current())); + assertTrue(PrivateCellUtil.equals(c1, decoder.current())); assertTrue(decoder.advance()); - assertTrue(CellUtil.equals(c2, decoder.current())); + assertTrue(PrivateCellUtil.equals(c2, decoder.current())); assertTrue(decoder.advance()); - assertTrue(CellUtil.equals(c3, decoder.current())); + assertTrue(PrivateCellUtil.equals(c3, decoder.current())); assertFalse(decoder.advance()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index 6ae5a74ebe8d..a37c5a5a5f90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -327,7 +327,7 @@ public void testNextOnSample() throws IOException { int i = 0; do { KeyValue expectedKeyValue = sampleKv.get(i); - Cell cell = seeker.getCell(); + ExtendedCell cell = seeker.getCell(); if ( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, expectedKeyValue, cell) != 0 @@ -360,7 +360,7 @@ public void testFirstKeyInBlockOnSample() throws IOException { DataBlockEncoder encoder = encoding.getEncoder(); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); - Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer)); + ExtendedCell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer)); KeyValue firstKv = sampleKv.get(0); if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) { int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(key, firstKv, false, true); @@ -394,20 +394,20 @@ public void testRowIndexWithTagsButNoTagsInCell() throws IOException { private void checkSeekingConsistency(List encodedSeekers, boolean seekBefore, ExtendedCell keyValue) { - Cell expectedKeyValue = null; + ExtendedCell expectedKeyValue = null; ByteBuffer expectedKey = null; ByteBuffer expectedValue = null; for (DataBlockEncoder.EncodedSeeker seeker : encodedSeekers) { seeker.seekToKeyInBlock(keyValue, seekBefore); seeker.rewind(); - Cell actualKeyValue = seeker.getCell(); + ExtendedCell actualKeyValue = seeker.getCell(); ByteBuffer actualKey = null; actualKey = ByteBuffer.wrap(((KeyValue) seeker.getKey()).getKey()); ByteBuffer actualValue = seeker.getValueShallowCopy(); if (expectedKeyValue != null) { - assertTrue(CellUtil.equals(expectedKeyValue, actualKeyValue)); + assertTrue(PrivateCellUtil.equals(expectedKeyValue, actualKeyValue)); } else { expectedKeyValue = actualKeyValue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index f3711428ce53..663c0d540499 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -34,7 +34,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -244,7 +244,7 @@ public void testHFileEncryption() throws Exception { scanner = reader.getScanner(conf, false, false); assertTrue("Initial seekTo failed", scanner.seekTo()); do { - Cell kv = scanner.getCell(); + ExtendedCell kv = scanner.getCell(); assertTrue("Read back an unexpected or invalid KV", testKvs.contains(KeyValueUtil.ensureKeyValue(kv))); i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java index 837ee17110ec..ec1ebfd9d633 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java @@ -26,7 +26,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -116,9 +116,9 @@ public void testBasicScanWithLRUCache() throws IOException { String method = this.getName(); this.region = initHRegion(tableName, method, conf, test_util, fam1); try { - List expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, false); + List expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, false); - List actual = performScan(row1, fam1); + List actual = performScan(row1, fam1); // Verify result for (int i = 0; i < expected.size(); i++) { assertFalse(actual.get(i) instanceof ByteBufferKeyValue); @@ -154,9 +154,9 @@ public void testBasicScanWithOffheapBucketCache() throws IOException { String method = this.getName(); this.region = initHRegion(tableName, method, conf, test_util, fam1); try { - List expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, false); + List expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, false); - List actual = performScan(row1, fam1); + List actual = performScan(row1, fam1); // Verify result for (int i = 0; i < expected.size(); i++) { assertFalse(actual.get(i) instanceof ByteBufferKeyValue); @@ -195,9 +195,9 @@ public void testBasicScanWithOffheapBucketCacheWithMBB() throws IOException { String method = this.getName(); this.region = initHRegion(tableName, method, conf, test_util, fam1); try { - List expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, true); + List expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, true); - List actual = performScan(row1, fam1); + List actual = performScan(row1, fam1); // Verify result for (int i = 0; i < expected.size(); i++) { assertFalse(actual.get(i) instanceof ByteBufferKeyValue); @@ -211,7 +211,7 @@ public void testBasicScanWithOffheapBucketCacheWithMBB() throws IOException { actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next(actual); + boolean hasNext = scanner.next((List) actual); assertEquals(false, hasNext); // Verify result for (int i = 0; i < expected.size(); i++) { @@ -229,7 +229,7 @@ public void testBasicScanWithOffheapBucketCacheWithMBB() throws IOException { } } - private List insertData(byte[] row1, byte[] qf1, byte[] qf2, byte[] fam1, long ts1, + private List insertData(byte[] row1, byte[] qf1, byte[] qf2, byte[] fam1, long ts1, long ts2, long ts3, boolean withVal) throws IOException { // Putting data in Region Put put = null; @@ -276,7 +276,7 @@ private List insertData(byte[] row1, byte[] qf1, byte[] qf2, byte[] fam1, } // Expected - List expected = new ArrayList<>(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -284,12 +284,12 @@ private List insertData(byte[] row1, byte[] qf1, byte[] qf2, byte[] fam1, return expected; } - private List performScan(byte[] row1, byte[] fam1) throws IOException { + private List performScan(byte[] row1, byte[] fam1) throws IOException { Scan scan = new Scan().withStartRow(row1).addFamily(fam1).readVersions(MAX_VERSIONS); - List actual = new ArrayList<>(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next(actual); + boolean hasNext = scanner.next((List) actual); assertEquals(false, hasNext); return actual; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index 04c38127d51f..f07238e134d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -25,13 +25,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; @@ -178,8 +178,8 @@ private void checkNoSeekBefore(ExtendedCell[] cells, HFileScanner scanner, int i } /** Check a key/value pair after it was read by the reader */ - private void checkCell(Cell expected, Cell actual) { + private void checkCell(ExtendedCell expected, ExtendedCell actual) { assertTrue(String.format("Expected key %s, but was %s", CellUtil.getCellKeyAsString(expected), - CellUtil.getCellKeyAsString(actual)), CellUtil.equals(expected, actual)); + CellUtil.getCellKeyAsString(actual)), PrivateCellUtil.equals(expected, actual)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index bf26b019e2a1..4c55c85f9035 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -171,7 +172,7 @@ protected void testSeekBeforeInternals(TagUsage tagUsage) throws IOException { assertEquals("g", toRowStr(scanner.getCell())); assertTrue(scanner.seekBefore(toKV("j", tagUsage))); assertEquals("i", toRowStr(scanner.getCell())); - Cell cell = scanner.getCell(); + ExtendedCell cell = scanner.getCell(); if (tagUsage != TagUsage.NO_TAG && cell.getTagsLength() > 0) { Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index 0f0c22baf9fc..248e8f9d8cef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -62,14 +62,14 @@ import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseServerBase; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MatcherPredicate; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; @@ -176,7 +176,7 @@ public void testNoCodec() throws IOException, ServiceException { public void testCompressCellBlock() throws IOException, ServiceException { Configuration clientConf = new Configuration(CONF); clientConf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName()); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); int count = 3; for (int i = 0; i < count; i++) { cells.add(CELL); @@ -188,7 +188,8 @@ public void testCompressCellBlock() throws IOException, ServiceException { try (AbstractRpcClient client = createRpcClient(clientConf)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); - HBaseRpcController pcrc = new HBaseRpcControllerImpl(CellUtil.createCellScanner(cells)); + HBaseRpcController pcrc = + new HBaseRpcControllerImpl(PrivateCellUtil.createExtendedCellScanner(cells)); String message = "hello"; assertEquals(message, stub.echo(pcrc, EchoRequestProto.newBuilder().setMessage(message).build()).getMessage()); @@ -270,9 +271,8 @@ public void testRpcMaxRequestSize() throws IOException, ServiceException { } // set total RPC size bigger than 100 bytes EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message.toString()).build(); - stub.echo( - new HBaseRpcControllerImpl(CellUtil.createCellScanner(ImmutableList. of(CELL))), - param); + stub.echo(new HBaseRpcControllerImpl( + PrivateCellUtil.createExtendedCellScanner(ImmutableList. of(CELL))), param); fail("RPC should have failed because it exceeds max request size"); } catch (ServiceException e) { LOG.info("Caught expected exception: " + e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java index 001f6dbd22c7..64fc47ca1940 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyChannelWritability.java @@ -32,13 +32,13 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RPCTests; @@ -112,7 +112,7 @@ public void testNettyWritableFatalThreshold() throws Exception { private void sendAndReceive(Configuration conf, NettyRpcServer rpcServer, int requestCount) throws Exception { - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); int count = 3; for (int i = 0; i < count; i++) { cells.add(CELL); @@ -136,9 +136,10 @@ private void sendAndReceive(Configuration conf, NettyRpcServer rpcServer, int re } } - private void sendMessage(List cells, + private void sendMessage(List cells, TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface stub) throws Exception { - HBaseRpcController pcrc = new HBaseRpcControllerImpl(CellUtil.createCellScanner(cells)); + HBaseRpcController pcrc = + new HBaseRpcControllerImpl(PrivateCellUtil.createExtendedCellScanner(cells)); String message = "hello"; assertEquals(message, stub.echo(pcrc, TestProtos.EchoRequestProto.newBuilder().setMessage(message).build()) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java index b2ac0a3deb9b..edb9a64a7a8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java @@ -21,10 +21,10 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -82,8 +82,8 @@ public EchoResponseProto echo(RpcController controller, EchoRequestProto request HBaseRpcController pcrc = (HBaseRpcController) controller; // If cells, scan them to check we are able to iterate what we were given and since this is an // echo, just put them back on the controller creating a new block. Tests our block building. - CellScanner cellScanner = pcrc.cellScanner(); - List list = null; + ExtendedCellScanner cellScanner = pcrc.cellScanner(); + List list = null; if (cellScanner != null) { list = new ArrayList<>(); try { @@ -94,7 +94,7 @@ public EchoResponseProto echo(RpcController controller, EchoRequestProto request throw new ServiceException(e); } } - cellScanner = CellUtil.createCellScanner(list); + cellScanner = PrivateCellUtil.createExtendedCellScanner(list); pcrc.setCellScanner(cellScanner); } return EchoResponseProto.newBuilder().setMessage(request.getMessage()).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index a7164a6fab64..a25bae6ec7bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -33,10 +33,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; @@ -388,9 +388,10 @@ public ScanResponse scan(RpcController controller, ScanRequest request) throws S Result result = next(scannerId); if (result != null) { builder.addCellsPerResult(result.size()); - List results = new ArrayList<>(1); + List results = new ArrayList<>(1); results.add(result); - ((HBaseRpcController) controller).setCellScanner(CellUtil.createCellScanner(results)); + ((HBaseRpcController) controller) + .setCellScanner(PrivateCellUtil.createExtendedCellScanner(results)); builder.setMoreResults(true); } else { builder.setMoreResults(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java index c2f2b3fd4260..835d8b83d69e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -373,11 +374,11 @@ private int countMobRows() throws IOException { InternalScanner scanner = region.getScanner(scan); int scannedCount = 0; - List results = new ArrayList<>(); + List results = new ArrayList<>(); boolean hasMore = true; while (hasMore) { - hasMore = scanner.next(results); - for (Cell c : results) { + hasMore = scanner.next((List) results); + for (ExtendedCell c : results) { if (MobUtils.isMobReferenceCell(c)) { scannedCount++; } @@ -401,15 +402,15 @@ private int countReferencedMobFiles() throws IOException { scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); InternalScanner scanner = region.getScanner(scan); - List kvs = new ArrayList<>(); + List kvs = new ArrayList<>(); boolean hasMore = true; String fileName; Set files = new HashSet<>(); do { kvs.clear(); - hasMore = scanner.next(kvs); + hasMore = scanner.next((List) kvs); for (Cell kv : kvs) { - if (!MobUtils.isMobReferenceCell(kv)) { + if (!MobUtils.isMobReferenceCell((ExtendedCell) kv)) { continue; } if (!MobUtils.hasValidMobRefCellValue(kv)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 124214e46af3..05243bd93a6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -717,7 +717,7 @@ public Message getParam() { } @Override - public CellScanner getCellScanner() { + public ExtendedCellScanner getCellScanner() { return null; } @@ -783,7 +783,7 @@ public int getRemotePort() { } @Override - public void setResponse(Message param, CellScanner cells, Throwable errorThrowable, + public void setResponse(Message param, ExtendedCellScanner cells, Throwable errorThrowable, String error) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java index 1de0a0d31a33..4ec3e90aad86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java @@ -29,7 +29,7 @@ import java.util.Collections; import java.util.Map; import java.util.Optional; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcCallback; @@ -118,7 +118,7 @@ public Message getParam() { } @Override - public CellScanner getCellScanner() { + public ExtendedCellScanner getCellScanner() { return null; } @@ -182,7 +182,7 @@ public int getRemotePort() { } @Override - public void setResponse(Message param, CellScanner cells, Throwable errorThrowable, + public void setResponse(Message param, ExtendedCellScanner cells, Throwable errorThrowable, String error) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index fdd5c7d5cf90..b440431f1fb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -28,7 +28,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.ipc.RpcCall; @@ -184,7 +184,7 @@ public Message getParam() { } @Override - public CellScanner getCellScanner() { + public ExtendedCellScanner getCellScanner() { return null; } @@ -244,7 +244,7 @@ public int getRemotePort() { } @Override - public void setResponse(Message param, CellScanner cells, Throwable errorThrowable, + public void setResponse(Message param, ExtendedCellScanner cells, Throwable errorThrowable, String error) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java index 5dd0ce8dafb7..bfbeed768554 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java @@ -23,8 +23,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -46,19 +46,19 @@ public class TestReplicationProtobuf { */ @Test public void testGetCellScanner() throws IOException { - List a = new ArrayList<>(); + List a = new ArrayList<>(); KeyValue akv = new KeyValue(Bytes.toBytes("a"), -1L); a.add(akv); // Add a few just to make it less regular. a.add(new KeyValue(Bytes.toBytes("aa"), -1L)); a.add(new KeyValue(Bytes.toBytes("aaa"), -1L)); - List b = new ArrayList<>(); + List b = new ArrayList<>(); KeyValue bkv = new KeyValue(Bytes.toBytes("b"), -1L); a.add(bkv); - List c = new ArrayList<>(); + List c = new ArrayList<>(); KeyValue ckv = new KeyValue(Bytes.toBytes("c"), -1L); c.add(ckv); - List> all = new ArrayList<>(); + List> all = new ArrayList<>(); all.add(a); all.add(b); all.add(c); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index ec9de92e9f25..edabfc51f2bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -251,7 +251,7 @@ public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit) th KeyValue currentKv; scanner.seek(KeyValue.LOWESTKEY); - List> codecIterators = new ArrayList<>(); + List> codecIterators = new ArrayList<>(); for (EncodedDataBlock codec : codecs) { codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum))); } @@ -260,8 +260,8 @@ public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit) th while ((currentKv = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) { // Iterates through key/value pairs ++j; - for (Iterator it : codecIterators) { - Cell c = it.next(); + for (Iterator it : codecIterators) { + ExtendedCell c = it.next(); KeyValue codecKv = KeyValueUtil.ensureKeyValue(c); if ( codecKv == null @@ -337,7 +337,7 @@ private int benchmarkEncoder(int previousTotalSize, EncodedDataBlock codec) { for (int itTime = 0; itTime < benchmarkNTimes; ++itTime) { totalSize = 0; - Iterator it; + Iterator it; it = codec.getIterator(HFileBlock.headerSize(useHBaseChecksum)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index 601370357744..f33faf8c8e02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -23,7 +23,6 @@ import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -97,7 +96,7 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, List results = new ArrayList<>(); - while (s.next(results)) + List results = new ArrayList<>(); + while (s.next((List) results)) ; s.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java index e2f9ac2f34ac..52826e2e8e23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -31,11 +31,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -187,10 +187,10 @@ private void testSubSet(CellSet cs) throws Exception { Iterator excludeIter = excludeTail.iterator(); Iterator includeIter = includeTail.iterator(); for (int j = 1 + i; j != ascCells.length; ++j) { - assertEquals(true, CellUtil.equals(excludeIter.next(), ascCells[j])); + assertEquals(true, PrivateCellUtil.equals(excludeIter.next(), ascCells[j])); } for (int j = i; j != ascCells.length; ++j) { - assertEquals(true, CellUtil.equals(includeIter.next(), ascCells[j])); + assertEquals(true, PrivateCellUtil.equals(includeIter.next(), ascCells[j])); } } assertEquals(NUM_OF_CELLS, cs.tailSet(lowerOuterCell, false).size()); @@ -203,10 +203,10 @@ private void testSubSet(CellSet cs) throws Exception { Iterator excludeIter = excludeHead.iterator(); Iterator includeIter = includeHead.iterator(); for (int j = 0; j != i; ++j) { - assertEquals(true, CellUtil.equals(excludeIter.next(), ascCells[j])); + assertEquals(true, PrivateCellUtil.equals(excludeIter.next(), ascCells[j])); } for (int j = 0; j != i + 1; ++j) { - assertEquals(true, CellUtil.equals(includeIter.next(), ascCells[j])); + assertEquals(true, PrivateCellUtil.equals(includeIter.next(), ascCells[j])); } } assertEquals(0, cs.headSet(lowerOuterCell, false).size()); @@ -217,7 +217,7 @@ private void testSubSet(CellSet cs) throws Exception { assertEquals(NUM_OF_CELLS, sub.size()); Iterator iter = sub.values().iterator(); for (int i = 0; i != ascCells.length; ++i) { - assertEquals(true, CellUtil.equals(iter.next(), ascCells[i])); + assertEquals(true, PrivateCellUtil.equals(iter.next(), ascCells[i])); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 53e183f82590..c9e6cd83ec6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -311,15 +311,15 @@ public void testGetReferencesFromFiles() throws IOException { InternalScanner scanner = (InternalScanner) store.getScanner(scan, scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), 0); - List results = new ArrayList<>(); - scanner.next(results); + List results = new ArrayList<>(); + scanner.next((List) results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); // Compare Assert.assertEquals(expected.size(), results.size()); for (int i = 0; i < results.size(); i++) { - Cell cell = results.get(i); + ExtendedCell cell = results.get(i); Assert.assertTrue(MobUtils.isMobReferenceCell(cell)); } } @@ -399,15 +399,15 @@ public void testMobCellSizeThreshold() throws IOException { InternalScanner scanner = (InternalScanner) store.getScanner(scan, scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), 0); - List results = new ArrayList<>(); - scanner.next(results); + List results = new ArrayList<>(); + scanner.next((List) results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); // Compare Assert.assertEquals(expected.size(), results.size()); for (int i = 0; i < results.size(); i++) { - Cell cell = results.get(i); + ExtendedCell cell = results.get(i); // this is not mob reference cell. Assert.assertFalse(MobUtils.isMobReferenceCell(cell)); Assert.assertEquals(expected.get(i), results.get(i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index a271920c0150..d9856b40a831 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -3780,26 +3781,26 @@ public void testRegionScanner_Next() throws IOException { scan.addFamily(fam2); scan.addFamily(fam4); try (InternalScanner is = region.getScanner(scan)) { - List res = null; + List res = null; // Result 1 - List expected1 = new ArrayList<>(); + List expected1 = new ArrayList<>(); expected1.add(new KeyValue(row1, fam2, null, ts, KeyValue.Type.Put, null)); expected1.add(new KeyValue(row1, fam4, null, ts, KeyValue.Type.Put, null)); res = new ArrayList<>(); - is.next(res); + is.next((List) res); for (int i = 0; i < res.size(); i++) { assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); } // Result 2 - List expected2 = new ArrayList<>(); + List expected2 = new ArrayList<>(); expected2.add(new KeyValue(row2, fam2, null, ts, KeyValue.Type.Put, null)); expected2.add(new KeyValue(row2, fam4, null, ts, KeyValue.Type.Put, null)); res = new ArrayList<>(); - is.next(res); + is.next((List) res); for (int i = 0; i < res.size(); i++) { assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); } @@ -3894,7 +3895,7 @@ public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws I region.flush(true); // Expected - List expected = new ArrayList<>(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -3904,9 +3905,9 @@ public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws I scan.addColumn(fam1, qf1); scan.addColumn(fam1, qf2); scan.readVersions(MAX_VERSIONS); - List actual = new ArrayList<>(); + List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next(actual); + boolean hasNext = scanner.next((List) actual); assertEquals(false, hasNext); // Verify result @@ -3968,7 +3969,7 @@ public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() region.put(put); // Expected - List expected = new ArrayList<>(); + List expected = new ArrayList<>(); expected.add(kv14); expected.add(kv13); expected.add(kv12); @@ -3981,9 +3982,9 @@ public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() scan.addColumn(fam1, qf2); int versions = 3; scan.readVersions(versions); - List actual = new ArrayList<>(); + List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next(actual); + boolean hasNext = scanner.next((List) actual); assertEquals(false, hasNext); // Verify result @@ -4082,7 +4083,7 @@ public void testScanner_Wildcard_FromFilesOnly_EnforceVersions() throws IOExcept region.flush(true); // Expected - List expected = new ArrayList<>(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -4091,9 +4092,9 @@ public void testScanner_Wildcard_FromFilesOnly_EnforceVersions() throws IOExcept Scan scan = new Scan().withStartRow(row1); scan.addFamily(fam1); scan.readVersions(MAX_VERSIONS); - List actual = new ArrayList<>(); + List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next(actual); + boolean hasNext = scanner.next((List) actual); assertEquals(false, hasNext); // Verify result @@ -4207,9 +4208,9 @@ public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions() throws I Scan scan = new Scan().withStartRow(row1); int versions = 3; scan.readVersions(versions); - List actual = new ArrayList<>(); + List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next(actual); + boolean hasNext = scanner.next((List) actual); assertEquals(false, hasNext); // Verify result diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 7b8fcf4e334c..6a410f953fe7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -397,8 +398,8 @@ static class FindBulkHBaseListener extends TestWALActionsListener.DummyWALAction @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { for (Cell cell : logEdit.getCells()) { - KeyValue kv = KeyValueUtil.ensureKeyValue(cell); - for (Map.Entry entry : kv.toStringMap().entrySet()) { + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); + for (Map.Entry entry : kv.toStringMap().entrySet()) { if (entry.getValue().equals(Bytes.toString(WALEdit.BULK_LOAD))) { found = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index a3fa1bb65db8..13f2101c0040 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -2113,7 +2113,7 @@ public void testForceCloneOfBigCellForCellChunkImmutableSegment() throws Excepti (StoreScanner) store.getScanner(new Scan(new Get(rowKey1)), quals, seqId + 1); SegmentScanner segmentScanner = getTypeKeyValueScanner(storeScanner, SegmentScanner.class); ExtendedCell resultCell1 = segmentScanner.next(); - assertTrue(CellUtil.equals(resultCell1, originalCell1)); + assertTrue(PrivateCellUtil.equals(resultCell1, originalCell1)); int cell1ChunkId = resultCell1.getChunkId(); assertTrue(cell1ChunkId != ExtendedCell.CELL_NOT_BASED_ON_CHUNK); assertNull(segmentScanner.next()); @@ -2140,12 +2140,12 @@ public void testForceCloneOfBigCellForCellChunkImmutableSegment() throws Excepti // {@link CellChunkMap#getCell} we could not get the data chunk by chunkId. storeScanner = (StoreScanner) store.getScanner(new Scan(new Get(rowKey1)), quals, seqId + 1); segmentScanner = getTypeKeyValueScanner(storeScanner, SegmentScanner.class); - Cell newResultCell1 = segmentScanner.next(); + ExtendedCell newResultCell1 = segmentScanner.next(); assertTrue(newResultCell1 != resultCell1); - assertTrue(CellUtil.equals(newResultCell1, originalCell1)); + assertTrue(PrivateCellUtil.equals(newResultCell1, originalCell1)); - Cell resultCell2 = segmentScanner.next(); - assertTrue(CellUtil.equals(resultCell2, originalCell2)); + ExtendedCell resultCell2 = segmentScanner.next(); + assertTrue(PrivateCellUtil.equals(resultCell2, originalCell2)); assertNull(segmentScanner.next()); segmentScanner.close(); storeScanner.close(); @@ -2569,17 +2569,17 @@ public void testClearSnapshotGetScannerConcurrently() throws Exception { assertTrue(!memStoreLAB.chunks.isEmpty()); assertTrue(!memStoreLAB.isReclaimed()); - Cell cell1 = segmentScanner.next(); - CellUtil.equals(smallCell, cell1); - Cell cell2 = segmentScanner.next(); - CellUtil.equals(largeCell, cell2); + ExtendedCell cell1 = segmentScanner.next(); + PrivateCellUtil.equals(smallCell, cell1); + ExtendedCell cell2 = segmentScanner.next(); + PrivateCellUtil.equals(largeCell, cell2); assertNull(segmentScanner.next()); } else { - List results = new ArrayList<>(); - storeScanner.next(results); + List results = new ArrayList<>(); + storeScanner.next((List) results); assertEquals(2, results.size()); - CellUtil.equals(smallCell, results.get(0)); - CellUtil.equals(largeCell, results.get(1)); + PrivateCellUtil.equals(smallCell, results.get(0)); + PrivateCellUtil.equals(largeCell, results.get(1)); } assertTrue(exceptionRef.get() == null); } finally { @@ -2712,11 +2712,11 @@ public void testMemoryLeakWhenFlushMemStoreRetrying() throws Exception { assertTrue(storeScanner.currentScanners.size() == 1); assertTrue(storeScanner.currentScanners.get(0) instanceof StoreFileScanner); - List results = new ArrayList<>(); - storeScanner.next(results); + List results = new ArrayList<>(); + storeScanner.next((List) results); assertEquals(2, results.size()); - CellUtil.equals(smallCell, results.get(0)); - CellUtil.equals(largeCell, results.get(1)); + PrivateCellUtil.equals(smallCell, results.get(0)); + PrivateCellUtil.equals(largeCell, results.get(1)); } finally { if (storeScanner != null) { storeScanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index 824c195fd0f1..3db0b3d3f64c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -1058,8 +1059,8 @@ public void testCacheOnWriteEvictOnClose() throws Exception { readerTwo.loadFileInfo(); StoreFileScanner scannerTwo = getStoreFileScanner(readerTwo, true, true); scannerTwo.seek(KeyValue.LOWESTKEY); - Cell kv1 = null; - Cell kv2 = null; + ExtendedCell kv1 = null; + ExtendedCell kv2 = null; while ((kv1 = scannerOne.next()) != null) { kv2 = scannerTwo.next(); assertTrue(kv1.equals(kv2)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index f9c332564d94..fdaa19dc3e5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -29,6 +29,7 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -349,8 +350,8 @@ public void testRawScan() throws Exception { s.setRaw(true); s.readAllVersions(); InternalScanner scan = region.getScanner(s); - List kvs = new ArrayList<>(); - scan.next(kvs); + List kvs = new ArrayList<>(); + scan.next((List) kvs); assertEquals(8, kvs.size()); assertTrue(PrivateCellUtil.isDeleteFamily(kvs.get(0))); assertArrayEquals(CellUtil.cloneValue(kvs.get(1)), T3); @@ -369,7 +370,7 @@ public void testRawScan() throws Exception { s.setTimeRange(0, 1); scan = region.getScanner(s); kvs = new ArrayList<>(); - scan.next(kvs); + scan.next((List) kvs); // nothing in this interval, not even delete markers assertTrue(kvs.isEmpty()); @@ -380,7 +381,7 @@ public void testRawScan() throws Exception { s.setTimeRange(0, ts + 2); scan = region.getScanner(s); kvs = new ArrayList<>(); - scan.next(kvs); + scan.next((List) kvs); assertEquals(4, kvs.size()); assertTrue(PrivateCellUtil.isDeleteFamily(kvs.get(0))); assertArrayEquals(CellUtil.cloneValue(kvs.get(1)), T1); @@ -395,7 +396,7 @@ public void testRawScan() throws Exception { s.setTimeRange(ts + 3, ts + 5); scan = region.getScanner(s); kvs = new ArrayList<>(); - scan.next(kvs); + scan.next((List) kvs); assertEquals(2, kvs.size()); assertArrayEquals(CellUtil.cloneValue(kvs.get(0)), T3); assertTrue(CellUtil.isDelete(kvs.get(1))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 0f55805f6fd8..0db4175916df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; @@ -210,15 +211,15 @@ public void testMultiColumnScanner() throws IOException { } InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList<>(); + List results = new ArrayList<>(); int kvPos = 0; int numResults = 0; String queryInfo = "columns queried: " + qualSet + " (columnBitMask=" + columnBitMask + "), maxVersions=" + maxVersions; - while (scanner.next(results) || results.size() > 0) { - for (Cell kv : results) { + while (scanner.next((List) results) || results.size() > 0) { + for (ExtendedCell kv : results) { while ( kvPos < kvs.size() && !matchesQuery(kvs.get(kvPos), qualSet, maxVersions, lastDelTimeMap) @@ -236,7 +237,7 @@ public void testMultiColumnScanner() throws IOException { "Scanner returned additional key/value: " + kv + ", " + queryInfo + deleteInfo + ";", kvPos < kvs.size()); assertTrue("Scanner returned wrong key/value; " + queryInfo + deleteInfo + ";", - PrivateCellUtil.equalsIgnoreMvccVersion(kvs.get(kvPos), (kv))); + PrivateCellUtil.equalsIgnoreMvccVersion(kvs.get(kvPos), kv)); ++kvPos; ++numResults; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java index a3b4c9e0347f..253ca876bd34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java @@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -251,7 +251,7 @@ private FlushResult flushPrimary() throws IOException { private void replicate(Pair, CompletableFuture> pair) throws IOException { Pair params = ReplicationProtobufUtil.buildReplicateWALEntryRequest( + ExtendedCellScanner> params = ReplicationProtobufUtil.buildReplicateWALEntryRequest( pair.getFirst().toArray(new WAL.Entry[0]), secondary.getRegionInfo().getEncodedNameAsBytes(), null, null, null); for (WALEntry entry : params.getFirst().getEntryList()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 67671fe12fef..e3ba4d85a3a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; @@ -93,7 +93,7 @@ public void testReseek() throws Exception { // Now do reseek with empty KV to position to the beginning of the file KeyValue k = KeyValueUtil.createFirstOnRow(Bytes.toBytes("k2")); s.reseek(k); - Cell kv = s.next(); + ExtendedCell kv = s.next(); kv = s.next(); kv = s.next(); byte[] key5 = Bytes.toBytes("k5"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 45b927c07afa..b04a0054276c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -489,7 +491,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { TestCoprocessorForTags.checkTagPresence = true; ResultScanner scanner = table.getScanner(new Scan()); Result result = scanner.next(); - KeyValue kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q)); + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) result.getColumnLatestCell(f, q)); List tags = TestCoprocessorForTags.tags; assertEquals(3L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); assertEquals(1, tags.size()); @@ -504,7 +506,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { TestCoprocessorForTags.checkTagPresence = true; scanner = table.getScanner(new Scan()); result = scanner.next(); - kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q)); + kv = KeyValueUtil.ensureKeyValue((ExtendedCell) result.getColumnLatestCell(f, q)); tags = TestCoprocessorForTags.tags; assertEquals(5L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); assertEquals(2, tags.size()); @@ -529,7 +531,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { TestCoprocessorForTags.checkTagPresence = true; scanner = table.getScanner(new Scan().withStartRow(row2)); result = scanner.next(); - kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q)); + kv = KeyValueUtil.ensureKeyValue((ExtendedCell) result.getColumnLatestCell(f, q)); tags = TestCoprocessorForTags.tags; assertEquals(4L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); assertEquals(1, tags.size()); @@ -549,7 +551,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { TestCoprocessorForTags.checkTagPresence = true; scanner = table.getScanner(new Scan().withStartRow(row3)); result = scanner.next(); - kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q)); + kv = KeyValueUtil.ensureKeyValue((ExtendedCell) result.getColumnLatestCell(f, q)); tags = TestCoprocessorForTags.tags; assertEquals(1, tags.size()); assertEquals("tag1", Bytes.toString(Tag.cloneValue(tags.get(0)))); @@ -563,7 +565,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { TestCoprocessorForTags.checkTagPresence = true; scanner = table.getScanner(new Scan().withStartRow(row3)); result = scanner.next(); - kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q)); + kv = KeyValueUtil.ensureKeyValue((ExtendedCell) result.getColumnLatestCell(f, q)); tags = TestCoprocessorForTags.tags; assertEquals(2, tags.size()); // We cannot assume the ordering of tags @@ -587,7 +589,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { TestCoprocessorForTags.checkTagPresence = true; scanner = table.getScanner(new Scan().withStartRow(row4)); result = scanner.next(); - kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q)); + kv = KeyValueUtil.ensureKeyValue((ExtendedCell) result.getColumnLatestCell(f, q)); tags = TestCoprocessorForTags.tags; assertEquals(1, tags.size()); assertEquals("tag2", Bytes.toString(Tag.cloneValue(tags.get(0)))); @@ -653,7 +655,7 @@ private void updateMutationAddingTags(final Mutation m) { if (attribute != null) { for (List edits : m.getFamilyCellMap().values()) { for (Cell cell : edits) { - KeyValue kv = KeyValueUtil.ensureKeyValue(cell); + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); if (cf == null) { cf = CellUtil.cloneFamily(kv); } @@ -696,9 +698,9 @@ public boolean postScannerNext(ObserverContext e, if (results.size() > 0) { // Check tag presence in the 1st cell in 1st Result Result result = results.get(0); - CellScanner cellScanner = result.cellScanner(); + ExtendedCellScanner cellScanner = result.cellScanner(); if (cellScanner.advance()) { - Cell cell = cellScanner.current(); + ExtendedCell cell = cellScanner.current(); tags = PrivateCellUtil.getTags(cell); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index 5359dec2e64d..028de5066c80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -203,7 +203,9 @@ public Scanner(KeyValue... kvs) { @Override public boolean next(List result, ScannerContext scannerContext) throws IOException { - if (kvs.isEmpty()) return false; + if (kvs.isEmpty()) { + return false; + } result.add(kvs.remove(0)); return !kvs.isEmpty(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index 484206ad8387..33efd51368bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -219,7 +220,7 @@ public void prePut(final ObserverContext e, final if (attribute != null) { for (List edits : put.getFamilyCellMap().values()) { for (Cell cell : edits) { - KeyValue kv = KeyValueUtil.ensureKeyValue(cell); + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); if (cf == null) { cf = CellUtil.cloneFamily(kv); } @@ -257,7 +258,7 @@ public void postGetOp(ObserverContext e, Get get, // Check tag presence in the 1st cell in 1st Result if (!results.isEmpty()) { Cell cell = results.get(0); - TAGS = PrivateCellUtil.getTags(cell); + TAGS = PrivateCellUtil.getTags((ExtendedCell) cell); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java index 0b9a9d663e5a..f3fd0878f9a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; @@ -273,7 +274,9 @@ public boolean evaluate(Cell cell) throws IOException { final List authLabelsFinal = authLabels; return new VisibilityExpEvaluator() { @Override - public boolean evaluate(Cell cell) throws IOException { + public boolean evaluate(Cell c) throws IOException { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; boolean visibilityTagPresent = false; // Save an object allocation where we can if (cell.getTagsLength() > 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index dc313d414ae8..10ca33e38725 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -292,7 +293,7 @@ protected static void doAssert(byte[] row, String visTag) throws Exception { (Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0, row.length)) ) { - List tags = PrivateCellUtil.getTags(cell); + List tags = PrivateCellUtil.getTags((ExtendedCell) cell); for (Tag tag : tags) { if (tag.getType() == TagType.STRING_VIS_TAG_TYPE) { assertEquals(visTag, Tag.getValueAsString(tag)); @@ -418,14 +419,15 @@ public void prePut(ObserverContext e, Put m, WALEd if (attribute != null) { for (List edits : m.getFamilyCellMap().values()) { for (Cell cell : edits) { - KeyValue kv = KeyValueUtil.ensureKeyValue(cell); + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); if (cf == null) { cf = CellUtil.cloneFamily(kv); } Tag tag = new ArrayBackedTag((byte) NON_VIS_TAG_TYPE, attribute); - List tagList = new ArrayList<>(PrivateCellUtil.getTags(cell).size() + 1); + List tagList = + new ArrayList<>(PrivateCellUtil.getTags((ExtendedCell) cell).size() + 1); tagList.add(tag); - tagList.addAll(PrivateCellUtil.getTags(cell)); + tagList.addAll(PrivateCellUtil.getTags((ExtendedCell) cell)); Cell newcell = PrivateCellUtil.createCell(kv, tagList); ((List) updatedCells).add(newcell); } @@ -452,7 +454,7 @@ public void postGetOp(ObserverContext e, Get get, // Check tag presence in the 1st cell in 1st Result if (!results.isEmpty()) { Cell cell = results.get(0); - tags = PrivateCellUtil.getTags(cell); + tags = PrivateCellUtil.getTags((ExtendedCell) cell); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index 20f621fba61a..d017db6eb9bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -144,7 +144,8 @@ public static void verifyTags(Table table) throws IOException { ResultScanner s = table.getScanner(new Scan()); for (Result r : s) { for (Cell c : r.listCells()) { - Optional tag = PrivateCellUtil.getTag(c, TagType.MOB_TABLE_NAME_TAG_TYPE); + Optional tag = + PrivateCellUtil.getTag((ExtendedCell) c, TagType.MOB_TABLE_NAME_TAG_TYPE); if (!tag.isPresent()) { fail(c.toString() + " has null tag"); continue; diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index a02f944e12a7..ee060bd53878 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants; @@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.OnlineLogRecord; import org.apache.hadoop.hbase.client.OperationWithAttributes; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -120,7 +122,7 @@ @InterfaceAudience.Private public final class ThriftUtilities { - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; + private final static ExtendedCell[] EMPTY_CELL_ARRAY = new ExtendedCell[0]; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); @@ -220,14 +222,14 @@ public static List getsFromThrift(List in) throws IOException { * @return converted result, returns an empty result if the input is null */ public static TResult resultFromHBase(Result in) { - Cell[] raw = in.rawCells(); + ExtendedCell[] raw = PackagePrivateFieldAccessor.getExtendedRawCells(in); TResult out = new TResult(); byte[] row = in.getRow(); if (row != null) { out.setRow(in.getRow()); } List columnValues = new ArrayList<>(raw.length); - for (Cell kv : raw) { + for (ExtendedCell kv : raw) { TColumnValue col = new TColumnValue(); col.setFamily(CellUtil.cloneFamily(kv)); col.setQualifier(CellUtil.cloneQualifier(kv)); @@ -1309,9 +1311,10 @@ public static TPut putFromHBase(Put in) { if (in.getDurability() != Durability.USE_DEFAULT) { out.setDurability(durabilityFromHBase(in.getDurability())); } - for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : PackagePrivateFieldAccessor + .getExtendedFamilyCellMap(in).entrySet()) { byte[] family = entry.getKey(); - for (Cell cell : entry.getValue()) { + for (ExtendedCell cell : entry.getValue()) { TColumnValue columnValue = new TColumnValue(); columnValue.setFamily(family).setQualifier(CellUtil.cloneQualifier(cell)) .setType(cell.getType().getCode()).setTimestamp(cell.getTimestamp()) @@ -1372,9 +1375,10 @@ public static TAppend appendFromHBase(Append in) throws IOException { if (in.getDurability() != Durability.USE_DEFAULT) { out.setDurability(durabilityFromHBase(in.getDurability())); } - for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : PackagePrivateFieldAccessor + .getExtendedFamilyCellMap(in).entrySet()) { byte[] family = entry.getKey(); - for (Cell cell : entry.getValue()) { + for (ExtendedCell cell : entry.getValue()) { TColumnValue columnValue = new TColumnValue(); columnValue.setFamily(family).setQualifier(CellUtil.cloneQualifier(cell)) .setType(cell.getType().getCode()).setTimestamp(cell.getTimestamp()) From f95ac7a9c409b4d56e53e9272481ac909e52f177 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 11 Jul 2024 22:37:35 +0800 Subject: [PATCH 456/514] HBASE-28722 Should wipe out all the output directories before unstash in nightly job (#6070) Signed-off-by: Xin Sun --- dev-support/Jenkinsfile | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 946525606145..f93408d6fd3a 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -954,13 +954,25 @@ pipeline { always { script { try { + sh "printenv" + // wipe out all the output directories before unstashing + sh''' + echo "Clean up result directories" + rm -rf ${OUTPUT_DIR_RELATIVE_GENERAL} + rm -rf ${OUTPUT_DIR_RELATIVE_JDK8_HADOOP2} + rm -rf ${OUTPUT_DIR_RELATIVE_JDK8_HADOOP3} + rm -rf ${OUTPUT_DIR_RELATIVE_JDK11_HADOOP3} + rm -rf ${OUTPUT_DIR_RELATIVE_JDK17_HADOOP3} + rm -rf output-srctarball + rm -rf output-integration + ''' unstash 'general-result' unstash 'jdk8-hadoop2-result' unstash 'jdk8-hadoop3-result' unstash 'jdk11-hadoop3-result' unstash 'jdk17-hadoop3-result' unstash 'srctarball-result' - sh "printenv" + def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/commentfile", "${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP2}/commentfile", "${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP3}/commentfile", From dc707937b63fcabad39334930a8b32da5422f8e1 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 12 Jul 2024 18:55:56 +0800 Subject: [PATCH 457/514] HBASE-28713 Add 2.6.x in hadoop support matrix in our ref guide (#6073) Signed-off-by: Bryan Beaudreault < bbeaudreault@apache.org> --- .../asciidoc/_chapters/configuration.adoc | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 8a732fd0e535..5dbc09b84a25 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -342,17 +342,18 @@ link:https://hadoop.apache.org/cve_list.html[CVEs] so we drop the support in new .Hadoop version support matrix for active release lines -[cols="1,1*^.^", options="header"] +[cols="1,2*^.^", options="header"] |=== -| | HBase-2.5.x -|Hadoop-2.10.[0-1] | icon:times-circle[role="red"] -|Hadoop-2.10.2+ | icon:check-circle[role="green"] -|Hadoop-3.1.0 | icon:times-circle[role="red"] -|Hadoop-3.1.1+ | icon:times-circle[role="red"] -|Hadoop-3.2.[0-2] | icon:times-circle[role="red"] -|Hadoop-3.2.3+ | icon:check-circle[role="green"] -|Hadoop-3.3.[0-1] | icon:times-circle[role="red"] -|Hadoop-3.3.2+ | icon:check-circle[role="green"] +| | HBase-2.5.x | HBase-2.6.x +|Hadoop-2.10.[0-1] | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-2.10.2+ | icon:check-circle[role="green"] | icon:check-circle[role="green"] +|Hadoop-3.1.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-3.1.1+ | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-3.2.[0-2] | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-3.2.3+ | icon:check-circle[role="green"] | icon:times-circle[role="red"] +|Hadoop-3.3.[0-1] | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-3.3.[2-4] | icon:check-circle[role="green"] | icon:times-circle[role="red"] +|Hadoop-3.3.5+ | icon:check-circle[role="green"] | icon:check-circle[role="green"] |=== .Hadoop version support matrix for EOM 2.3+ release lines From e45e2383b865c765d464faac41994a3e61eca7e5 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 13 Jul 2024 22:26:07 +0800 Subject: [PATCH 458/514] HBASE-28723 [JDK17] TestSecureIPC fails under JDK17 (#6071) Signed-off-by: Yi Mei --- pom.xml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pom.xml b/pom.xml index a35307149d94..044a8c927883 100644 --- a/pom.xml +++ b/pom.xml @@ -990,10 +990,12 @@ -Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced -Dio.opentelemetry.context.enableStrictContext=true -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported @@ -1011,7 +1013,8 @@ --add-exports java.base/sun.net.util=ALL-UNNAMED --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED --add-opens java.base/sun.security.x509=ALL-UNNAMED - --add-opens java.base/sun.security.util=ALL-UNNAMED + --add-opens java.base/sun.security.util=ALL-UNNAMED + --add-opens java.base/java.net=ALL-UNNAMED ${hbase-surefire.argLine} @{jacocoArgLine} 1.5.1 From e9ea1b83796d9cfee524c20c25645291e8cde8d5 Mon Sep 17 00:00:00 2001 From: lupeng Date: Mon, 15 Jul 2024 11:39:07 +0800 Subject: [PATCH 459/514] HBASE-28704 The expired snapshot can be read by CopyTable or ExportSnapshot (#6047) Signed-off-by: Duo Zhang Signed-off-by: Liangjun He --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 48 ++++++---- .../hadoop/hbase/mapreduce/TestCopyTable.java | 42 ++++++++ .../hbase/snapshot/TestExportSnapshot.java | 96 +++++++++++++++---- .../hbase/snapshot/RestoreSnapshotHelper.java | 7 ++ .../snapshot/TestRestoreSnapshotHelper.java | 41 ++++++++ 5 files changed, 194 insertions(+), 40 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 4e0c54b718bb..d10ff7f9b3ea 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -79,6 +79,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @@ -138,9 +139,9 @@ static final class Options { static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, "Do not verify checksum, use name+length only."); static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, - "Do not verify the integrity of the exported snapshot."); - static final Option NO_SOURCE_VERIFY = - new Option(null, "no-source-verify", false, "Do not verify the source of the snapshot."); + "Do not verify the exported snapshot's expiration status and integrity."); + static final Option NO_SOURCE_VERIFY = new Option(null, "no-source-verify", false, + "Do not verify the source snapshot's expiration status and integrity."); static final Option OVERWRITE = new Option(null, "overwrite", false, "Rewrite the snapshot manifest if already exists."); static final Option CHUSER = @@ -936,13 +937,17 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, final Strin } } - private void verifySnapshot(final Configuration baseConf, final FileSystem fs, final Path rootDir, - final Path snapshotDir) throws IOException { + private void verifySnapshot(final SnapshotDescription snapshotDesc, final Configuration baseConf, + final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException { // Update the conf with the current root dir, since may be a different cluster Configuration conf = new Configuration(baseConf); CommonFSUtils.setRootDir(conf, rootDir); CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf)); - SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + boolean isExpired = SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDesc.getTtl(), + snapshotDesc.getCreationTime(), EnvironmentEdgeManager.currentTime()); + if (isExpired) { + throw new SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshotDesc)); + } SnapshotReferenceUtil.verifySnapshot(conf, fs, snapshotDir, snapshotDesc); } @@ -1044,14 +1049,14 @@ public int doWork() throws IOException { if (snapshotName == null) { System.err.println("Snapshot name not provided."); LOG.error("Use -h or --help for usage instructions."); - return 0; + return EXIT_FAILURE; } if (outputRoot == null) { System.err .println("Destination file-system (--" + Options.COPY_TO.getLongOpt() + ") not provided."); LOG.error("Use -h or --help for usage instructions."); - return 0; + return EXIT_FAILURE; } if (targetName == null) { @@ -1079,11 +1084,14 @@ public int doWork() throws IOException { LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs, outputRoot.toString(), skipTmp, initialOutputSnapshotDir); + // throw CorruptedSnapshotException if we can't read the snapshot info. + SnapshotDescription sourceSnapshotDesc = + SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir); + // Verify snapshot source before copying files if (verifySource) { - LOG.info("Verify snapshot source, inputFs={}, inputRoot={}, snapshotDir={}.", - inputFs.getUri(), inputRoot, snapshotDir); - verifySnapshot(srcConf, inputFs, inputRoot, snapshotDir); + LOG.info("Verify the source snapshot's expiration status and integrity."); + verifySnapshot(sourceSnapshotDesc, srcConf, inputFs, inputRoot, snapshotDir); } // Find the necessary directory which need to change owner and group @@ -1104,12 +1112,12 @@ public int doWork() throws IOException { if (overwrite) { if (!outputFs.delete(outputSnapshotDir, true)) { System.err.println("Unable to remove existing snapshot directory: " + outputSnapshotDir); - return 1; + return EXIT_FAILURE; } } else { System.err.println("The snapshot '" + targetName + "' already exists in the destination: " + outputSnapshotDir); - return 1; + return EXIT_FAILURE; } } @@ -1120,7 +1128,7 @@ public int doWork() throws IOException { if (!outputFs.delete(snapshotTmpDir, true)) { System.err .println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir); - return 1; + return EXIT_FAILURE; } } else { System.err @@ -1129,7 +1137,7 @@ public int doWork() throws IOException { .println("Please check " + snapshotTmpDir + ". If the snapshot has completed, "); System.err .println("consider removing " + snapshotTmpDir + " by using the -overwrite option"); - return 1; + return EXIT_FAILURE; } } } @@ -1208,19 +1216,21 @@ public int doWork() throws IOException { // Step 4 - Verify snapshot integrity if (verifyTarget) { - LOG.info("Verify snapshot integrity"); - verifySnapshot(destConf, outputFs, outputRoot, outputSnapshotDir); + LOG.info("Verify the exported snapshot's expiration status and integrity."); + SnapshotDescription targetSnapshotDesc = + SnapshotDescriptionUtils.readSnapshotInfo(outputFs, outputSnapshotDir); + verifySnapshot(targetSnapshotDesc, destConf, outputFs, outputRoot, outputSnapshotDir); } LOG.info("Export Completed: " + targetName); - return 0; + return EXIT_SUCCESS; } catch (Exception e) { LOG.error("Snapshot export failed", e); if (!skipTmp) { outputFs.delete(snapshotTmpDir, true); } outputFs.delete(outputSnapshotDir, true); - return 1; + return EXIT_FAILURE; } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 11e377b199f4..5c3e9b65079d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -19,23 +19,31 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -210,6 +218,40 @@ public void testLoadingSnapshotToTable() throws Exception { testCopyTableBySnapshot("testLoadingSnapshotToTable", false, false); } + @Test + public void testLoadingTtlExpiredSnapshotToTable() throws Exception { + String tablePrefix = "testLoadingExpiredSnapshotToTable"; + TableName table1 = TableName.valueOf(tablePrefix + 1); + TableName table2 = TableName.valueOf(tablePrefix + 2); + Table t1 = createTable(table1, FAMILY_A, false); + createTable(table2, FAMILY_A, false); + loadData(t1, FAMILY_A, Bytes.toBytes("qualifier")); + String snapshot = tablePrefix + "_snapshot"; + Map properties = new HashMap<>(); + properties.put("TTL", 10); + SnapshotDescription snapshotDescription = new SnapshotDescription(snapshot, table1, + SnapshotType.FLUSH, null, EnvironmentEdgeManager.currentTime(), -1, properties); + TEST_UTIL.getAdmin().snapshot(snapshotDescription); + boolean isExist = + TEST_UTIL.getAdmin().listSnapshots().stream().anyMatch(ele -> snapshot.equals(ele.getName())); + assertTrue(isExist); + int retry = 6; + while ( + !SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(), + snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()) && retry > 0 + ) { + retry--; + Thread.sleep(10 * 1000); + } + boolean isExpiredSnapshot = + SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(), + snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()); + assertTrue(isExpiredSnapshot); + String[] args = new String[] { "--snapshot", "--new.name=" + table2, "--bulkload", snapshot }; + assertThrows(SnapshotTTLExpiredException.class, + () -> runCopy(TEST_UTIL.getConfiguration(), args)); + } + @Test public void tsetLoadingSnapshotToMobTable() throws Exception { testCopyTableBySnapshot("testLoadingSnapshotToMobTable", false, true); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 813da956799e..133737bb3972 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -44,12 +44,14 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -281,6 +283,39 @@ public void testExportWithResetTtl() throws Exception { } } + @Test + public void testExportExpiredSnapshot() throws Exception { + String name = "testExportExpiredSnapshot"; + TableName tableName = TableName.valueOf(name); + String snapshotName = "snapshot-" + name; + createTable(tableName); + SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY); + Map properties = new HashMap<>(); + properties.put("TTL", 10); + org.apache.hadoop.hbase.client.SnapshotDescription snapshotDescription = + new org.apache.hadoop.hbase.client.SnapshotDescription(snapshotName, tableName, + SnapshotType.FLUSH, null, EnvironmentEdgeManager.currentTime(), -1, properties); + admin.snapshot(snapshotDescription); + boolean isExist = + admin.listSnapshots().stream().anyMatch(ele -> snapshotName.equals(ele.getName())); + assertTrue(isExist); + int retry = 6; + while ( + !SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(), + snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()) && retry > 0 + ) { + retry--; + Thread.sleep(10 * 1000); + } + boolean isExpiredSnapshot = + SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(), + snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()); + assertTrue(isExpiredSnapshot); + int res = runExportSnapshot(TEST_UTIL.getConfiguration(), snapshotName, snapshotName, + TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false, false, true, true); + assertTrue(res == AbstractHBaseTool.EXIT_FAILURE); + } + private void testExportFileSystemState(final TableName tableName, final String snapshotName, final String targetName, int filesExpected) throws Exception { testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, @@ -312,29 +347,10 @@ protected static void testExportFileSystemState(final Configuration conf, FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); - LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, - rawTgtDir, srcFs.getUri(), srcDir); - List opts = new ArrayList<>(); - opts.add("--snapshot"); - opts.add(snapshotName); - opts.add("--copy-to"); - opts.add(tgtDir.toString()); - if (!targetName.equals(snapshotName)) { - opts.add("--target"); - opts.add(targetName); - } - if (overwrite) { - opts.add("--overwrite"); - } - if (resetTtl) { - opts.add("--reset-ttl"); - } - if (!checksumVerify) { - opts.add("--no-checksum-verify"); - } // Export Snapshot - int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); + int res = runExportSnapshot(conf, snapshotName, targetName, srcDir, rawTgtDir, overwrite, + resetTtl, checksumVerify, true, true); assertEquals("success " + success + ", res=" + res, success ? 0 : 1, res); if (!success) { final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName); @@ -467,4 +483,42 @@ private static void removeExportDir(final Path path) throws IOException { FileSystem fs = FileSystem.get(path.toUri(), new Configuration()); fs.delete(path, true); } + + private static int runExportSnapshot(final Configuration conf, final String sourceSnapshotName, + final String targetSnapshotName, final Path srcDir, Path rawTgtDir, final boolean overwrite, + final boolean resetTtl, final boolean checksumVerify, final boolean noSourceVerify, + final boolean noTargetVerify) throws Exception { + FileSystem tgtFs = rawTgtDir.getFileSystem(conf); + FileSystem srcFs = srcDir.getFileSystem(conf); + Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); + LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, + rawTgtDir, srcFs.getUri(), srcDir); + List opts = new ArrayList<>(); + opts.add("--snapshot"); + opts.add(sourceSnapshotName); + opts.add("--copy-to"); + opts.add(tgtDir.toString()); + if (!targetSnapshotName.equals(sourceSnapshotName)) { + opts.add("--target"); + opts.add(targetSnapshotName); + } + if (overwrite) { + opts.add("--overwrite"); + } + if (resetTtl) { + opts.add("--reset-ttl"); + } + if (!checksumVerify) { + opts.add("--no-checksum-verify"); + } + if (!noSourceVerify) { + opts.add("--no-source-verify"); + } + if (!noTargetVerify) { + opts.add("--no-target-verify"); + } + + // Export Snapshot + return run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 8395456cd76e..377cfda03a7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.Pair; @@ -877,6 +878,12 @@ public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, File Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + // check if the snapshot is expired. + boolean isExpired = SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDesc.getTtl(), + snapshotDesc.getCreationTime(), EnvironmentEdgeManager.currentTime()); + if (isExpired) { + throw new SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshotDesc)); + } SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); MonitoredTask status = TaskMonitor.get() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index c18c07fc12e9..2b8fa85119b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -18,10 +18,13 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; @@ -32,6 +35,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -45,6 +49,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.junit.After; @@ -177,6 +182,42 @@ public void testSkipReplayAndUpdateSeqId() throws Exception { } } + @Test + public void testCopyExpiredSnapshotForScanner() throws IOException, InterruptedException { + rootDir = TEST_UTIL.getDefaultRootDirPath(); + CommonFSUtils.setRootDir(conf, rootDir); + TableName tableName = TableName.valueOf("testCopyExpiredSnapshotForScanner"); + String snapshotName = tableName.getNameAsString() + "-snapshot"; + Path restoreDir = new Path("/hbase/.tmp-expired-snapshot/copySnapshotDest"); + // create table and put some data into the table + byte[] columnFamily = Bytes.toBytes("A"); + Table table = TEST_UTIL.createTable(tableName, columnFamily); + TEST_UTIL.loadTable(table, columnFamily); + // create snapshot with ttl = 10 sec + Map properties = new HashMap<>(); + properties.put("TTL", 10); + org.apache.hadoop.hbase.client.SnapshotDescription snapshotDesc = + new org.apache.hadoop.hbase.client.SnapshotDescription(snapshotName, tableName, + SnapshotType.FLUSH, null, EnvironmentEdgeManager.currentTime(), -1, properties); + TEST_UTIL.getAdmin().snapshot(snapshotDesc); + boolean isExist = TEST_UTIL.getAdmin().listSnapshots().stream() + .anyMatch(ele -> snapshotName.equals(ele.getName())); + assertTrue(isExist); + int retry = 6; + while ( + !SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDesc.getTtl(), + snapshotDesc.getCreationTime(), EnvironmentEdgeManager.currentTime()) && retry > 0 + ) { + retry--; + Thread.sleep(10 * 1000); + } + boolean isExpiredSnapshot = SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDesc.getTtl(), + snapshotDesc.getCreationTime(), EnvironmentEdgeManager.currentTime()); + assertTrue(isExpiredSnapshot); + assertThrows(SnapshotTTLExpiredException.class, () -> RestoreSnapshotHelper + .copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName)); + } + protected void createTableAndSnapshot(TableName tableName, String snapshotName) throws IOException { byte[] column = Bytes.toBytes("A"); From 386ecea0a9ceb2f7f455be4f1184e156ecac6431 Mon Sep 17 00:00:00 2001 From: lupeng Date: Mon, 15 Jul 2024 11:45:49 +0800 Subject: [PATCH 460/514] HBASE-28727 SteppingSplitPolicy may not work when table enables region replication (#6077) Signed-off-by: Duo Zhang --- ...creasingToUpperBoundRegionSplitPolicy.java | 6 ++- .../regionserver/TestRegionSplitPolicy.java | 40 +++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index 7a0393fc0aee..0386bf8245e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.yetus.audience.InterfaceAudience; @@ -97,7 +98,10 @@ private int getCountOfCommonTableRegions() { int tableRegionsCount = 0; try { List hri = rss.getRegions(tablename); - tableRegionsCount = hri == null || hri.isEmpty() ? 0 : hri.size(); + if (hri != null && !hri.isEmpty()) { + tableRegionsCount = (int) hri.stream() + .filter(r -> r.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID).count(); + } } catch (IOException e) { LOG.debug("Failed getOnlineRegions " + tablename, e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index 0c14c9aa812b..404935d3e949 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -154,6 +154,46 @@ public void testIncreasingToUpperBoundRegionSplitPolicy() throws IOException { assertWithinJitter(maxSplitSize, policy.getSizeToCheck(0)); } + @Test + public void testSteppingSplitPolicyWithRegionReplication() throws IOException { + conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, SteppingSplitPolicy.class.getName()); + + RegionServerServices rss = mock(RegionServerServices.class); + doReturn(rss).when(mockRegion).getRegionServerServices(); + + long maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE; + TableDescriptor td = + TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(maxFileSize).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); + assertEquals(td.getMaxFileSize(), maxFileSize); + + List storefiles = new ArrayList<>(); + HStore mockStore = mock(HStore.class); + long flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, + TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); + long exceedSize = flushSize * 2 + 1; + doReturn(exceedSize).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); + storefiles.add(mockStore); + doReturn(storefiles).when(mockRegion).getStores(); + + List regions = new ArrayList<>(); + HRegion r1 = mock(HRegion.class); + RegionInfo regionInfo1 = mock(RegionInfo.class); + doReturn(regionInfo1).when(r1).getRegionInfo(); + doReturn(RegionInfo.DEFAULT_REPLICA_ID).when(regionInfo1).getReplicaId(); + HRegion r2 = mock(HRegion.class); + RegionInfo regionInfo2 = mock(RegionInfo.class); + doReturn(regionInfo2).when(r2).getRegionInfo(); + doReturn(1).when(regionInfo2).getReplicaId(); + regions.add(r1); + regions.add(r2); + doReturn(regions).when(rss).getRegions(td.getTableName()); + + SteppingSplitPolicy policy = (SteppingSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + assertTrue(policy.shouldSplit()); + } + @Test public void testIsExceedSize() throws IOException { // Configure SteppingAllStoresSizeSplitPolicy as our split policy From 2b673bd429a4bcbdacd7e44716ccb324398affe6 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Mon, 15 Jul 2024 11:00:10 +0100 Subject: [PATCH 461/514] HBASE-28724 BucketCache.notifyFileCachingCompleted may throw IllegalMonitorStateException (#6074) Signed-off-by: Peter Somogyi --- .../hbase/io/hfile/bucket/BucketCache.java | 1 + .../hadoop/hbase/io/hfile/CacheTestUtils.java | 25 ++++++--- .../io/hfile/bucket/TestBucketCache.java | 53 +++++++++++++++++++ 3 files changed, 71 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 8ee0b6b98ada..5816b8ff1602 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -2108,6 +2108,7 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d for (ReentrantReadWriteLock lock : locks) { lock.readLock().unlock(); } + locks.clear(); LOG.debug("There are still blocks pending caching for file {}. Will sleep 100ms " + "and try the verification again.", fileName); Thread.sleep(100); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 262408e91a82..848f33bb9c3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -32,6 +32,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; @@ -275,6 +276,10 @@ public BlockType getBlockType() { } public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) { + return generateBlocksForPath(blockSize, numBlocks, null); + } + + public static HFileBlockPair[] generateBlocksForPath(int blockSize, int numBlocks, Path path) { HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks]; Random rand = ThreadLocalRandom.current(); HashSet usedStrings = new HashSet<>(); @@ -299,16 +304,20 @@ public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) prevBlockOffset, ByteBuff.wrap(cachedBuffer), HFileBlock.DONT_FILL_HEADER, blockSize, onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, meta, ByteBuffAllocator.HEAP); - - String strKey; - /* No conflicting keys */ - strKey = Long.toString(rand.nextLong()); - while (!usedStrings.add(strKey)) { - strKey = Long.toString(rand.nextLong()); + String key = null; + long offset = 0; + if (path != null) { + key = path.getName(); + offset = i * blockSize; + } else { + /* No conflicting keys */ + key = Long.toString(rand.nextLong()); + while (!usedStrings.add(key)) { + key = Long.toString(rand.nextLong()); + } } - returnedBlocks[i] = new HFileBlockPair(); - returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0); + returnedBlocks[i].blockName = new BlockCacheKey(key, offset); returnedBlocks[i].block = generated; } return returnedBlocks; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 6a9b5bf382a6..78a781994e83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -890,4 +890,57 @@ public void testBlockAdditionWaitWhenCache() throws Exception { HBASE_TESTING_UTILITY.cleanupTestDir(); } } + + @Test + public void testNotifyFileCachingCompletedSuccess() throws Exception { + BucketCache bucketCache = null; + try { + Path filePath = + new Path(HBASE_TESTING_UTILITY.getDataTestDir(), "testNotifyFileCachingCompletedSuccess"); + bucketCache = testNotifyFileCachingCompleted(filePath, 10); + assertTrue(bucketCache.fullyCachedFiles.containsKey(filePath.getName())); + } finally { + if (bucketCache != null) { + bucketCache.shutdown(); + } + HBASE_TESTING_UTILITY.cleanupTestDir(); + } + } + + @Test + public void testNotifyFileCachingCompletedNotAllCached() throws Exception { + BucketCache bucketCache = null; + try { + Path filePath = new Path(HBASE_TESTING_UTILITY.getDataTestDir(), + "testNotifyFileCachingCompletedNotAllCached"); + // Deliberately passing more blocks than we have created to test that + // notifyFileCachingCompleted will not consider the file fully cached + bucketCache = testNotifyFileCachingCompleted(filePath, 12); + assertFalse(bucketCache.fullyCachedFiles.containsKey(filePath.getName())); + } finally { + if (bucketCache != null) { + bucketCache.shutdown(); + } + HBASE_TESTING_UTILITY.cleanupTestDir(); + } + } + + private BucketCache testNotifyFileCachingCompleted(Path filePath, int totalBlocks) + throws Exception { + final Path dataTestDir = createAndGetTestDir(); + String ioEngineName = "file:" + dataTestDir + "/bucketNoRecycler.cache"; + BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, + constructedBlockSizes, 1, 1, null); + long usedByteSize = bucketCache.getAllocator().getUsedSize(); + assertEquals(0, usedByteSize); + HFileBlockPair[] hfileBlockPairs = + CacheTestUtils.generateBlocksForPath(constructedBlockSize, 10, filePath); + // Add blocks + for (HFileBlockPair hfileBlockPair : hfileBlockPairs) { + bucketCache.cacheBlock(hfileBlockPair.getBlockName(), hfileBlockPair.getBlock(), false, true); + } + bucketCache.notifyFileCachingCompleted(filePath, totalBlocks, totalBlocks, + totalBlocks * constructedBlockSize); + return bucketCache; + } } From 50a495f735b88f933311dd9fff7d6ce9364298a6 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 16 Jul 2024 21:50:56 +0800 Subject: [PATCH 462/514] HBASE-28683 Only allow one TableProcedureInterface for a single table to run at the same time for some special procedure types (#6046) Signed-off-by: Viraj Jasani --- .../hadoop/hbase/procedure2/Procedure.java | 2 +- .../hbase/procedure2/ProcedureExecutor.java | 22 ++++ .../procedure/MasterProcedureScheduler.java | 69 +++++++++-- .../procedure/TableProcedureWaitingQueue.java | 117 ++++++++++++++++++ .../hbase/master/procedure/TableQueue.java | 2 +- ...ocedureSchedulerPerformanceEvaluation.java | 1 + .../TestMasterProcedureScheduler.java | 42 +++++-- ...stMasterProcedureSchedulerConcurrency.java | 5 +- 8 files changed, 238 insertions(+), 22 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureWaitingQueue.java diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 7bd64fd9944d..7b6e7ab8e983 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -903,7 +903,7 @@ protected synchronized void setExecuted() { this.wasExecuted = true; } - protected synchronized boolean wasExecuted() { + public synchronized boolean wasExecuted() { return wasExecuted; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 0edfac8e8840..a0b43d4d8486 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -496,6 +496,28 @@ private void processWaitingTimeoutProcedures(List> waiti private void pushProceduresAfterLoad(List> runnableList, List> failedList) { failedList.forEach(scheduler::addBack); + // Put the procedures which have been executed first + // For table procedures, to prevent concurrent modifications, we only allow one procedure to run + // for a single table at the same time, this is done via inserting a waiting queue before + // actually add the procedure to run queue. So when loading here, we should add the procedures + // which have been executed first, otherwise another procedure which was in the waiting queue + // before restarting may be added to run queue first and still cause concurrent modifications. + // See HBASE-28263 for the reason why we need this + runnableList.sort((p1, p2) -> { + if (p1.wasExecuted()) { + if (p2.wasExecuted()) { + return Long.compare(p1.getProcId(), p2.getProcId()); + } else { + return -1; + } + } else { + if (p2.wasExecuted()) { + return 1; + } else { + return Long.compare(p1.getProcId(), p2.getProcId()); + } + } + }); runnableList.forEach(p -> { p.afterReplay(getEnvironment()); if (!p.hasParent()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index a5ef7c5d9239..9f81019606e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -19,7 +19,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.function.Function; import java.util.function.Supplier; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -113,9 +115,17 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { private MetaQueue metaMap = null; private GlobalQueue globalMap = null; + private final Function> procedureRetriever; private final SchemaLocking locking; + // To prevent multiple Create/Modify/Disable/Enable table procedure run at the same time, we will + // keep table procedure in this queue first before actually enqueuing it to tableQueue + // Seee HBASE-28683 for more details + private final Map tableProcsWaitingEnqueue = + new HashMap<>(); + public MasterProcedureScheduler(Function> procedureRetriever) { + this.procedureRetriever = procedureRetriever; locking = new SchemaLocking(procedureRetriever); } @@ -124,11 +134,26 @@ public void yield(final Procedure proc) { push(proc, false, true); } + private boolean shouldWaitBeforeEnqueuing(TableProcedureInterface proc) { + return TableQueue.requireTableExclusiveLock(proc); + } + @Override protected void enqueue(final Procedure proc, final boolean addFront) { if (isMetaProcedure(proc)) { doAdd(metaRunQueue, getMetaQueue(), proc, addFront); } else if (isTableProcedure(proc)) { + TableProcedureInterface tableProc = (TableProcedureInterface) proc; + if (shouldWaitBeforeEnqueuing(tableProc)) { + TableProcedureWaitingQueue waitingQueue = tableProcsWaitingEnqueue.computeIfAbsent( + tableProc.getTableName(), k -> new TableProcedureWaitingQueue(procedureRetriever)); + if (!waitingQueue.procedureSubmitted(proc)) { + // there is a table procedure for this table already enqueued, waiting + LOG.debug("There is already a procedure running for table {}, added {} to waiting queue", + tableProc.getTableName(), proc); + return; + } + } doAdd(tableRunQueue, getTableQueue(getTableName(proc)), proc, addFront); } else if (isServerProcedure(proc)) { ServerProcedureInterface spi = (ServerProcedureInterface) proc; @@ -277,6 +302,7 @@ private void clearQueue() { // Remove Tables clear(tableMap, tableRunQueue, TABLE_QUEUE_KEY_COMPARATOR); tableMap = null; + tableProcsWaitingEnqueue.clear(); // Remove Peers clear(peerMap, peerRunQueue, PEER_QUEUE_KEY_COMPARATOR); @@ -323,17 +349,44 @@ protected int queueSize() { count += queueSize(peerMap); count += queueSize(metaMap); count += queueSize(globalMap); + for (TableProcedureWaitingQueue waitingQ : tableProcsWaitingEnqueue.values()) { + count += waitingQ.waitingSize(); + } return count; } @Override public void completionCleanup(final Procedure proc) { - if (proc instanceof TableProcedureInterface) { - TableProcedureInterface iProcTable = (TableProcedureInterface) proc; + if (isTableProcedure(proc)) { + TableProcedureInterface tableProc = (TableProcedureInterface) proc; + if (shouldWaitBeforeEnqueuing(tableProc)) { + schedLock(); + try { + TableProcedureWaitingQueue waitingQueue = + tableProcsWaitingEnqueue.get(tableProc.getTableName()); + if (waitingQueue != null) { + waitingQueue.procedureCompleted(proc).ifPresentOrElse(next -> { + // enqueue it + LOG.debug("{} completed, enqueue a new procedure {}", proc, next); + doAdd(tableRunQueue, getTableQueue(tableProc.getTableName()), next, false); + }, () -> { + if (waitingQueue.isEmpty()) { + // there is no waiting procedures in it, remove + tableProcsWaitingEnqueue.remove(tableProc.getTableName()); + } + }); + } else { + // this should not happen normally, warn it + LOG.warn("no waiting queue while completing {}, which should not happen", proc); + } + } finally { + schedUnlock(); + } + } boolean tableDeleted; if (proc.hasException()) { Exception procEx = proc.getException().unwrapRemoteException(); - if (iProcTable.getTableOperationType() == TableOperationType.CREATE) { + if (tableProc.getTableOperationType() == TableOperationType.CREATE) { // create failed because the table already exist tableDeleted = !(procEx instanceof TableExistsException); } else { @@ -342,11 +395,10 @@ public void completionCleanup(final Procedure proc) { } } else { // the table was deleted - tableDeleted = (iProcTable.getTableOperationType() == TableOperationType.DELETE); + tableDeleted = (tableProc.getTableOperationType() == TableOperationType.DELETE); } if (tableDeleted) { - markTableAsDeleted(iProcTable.getTableName(), proc); - return; + markTableAsDeleted(tableProc.getTableName(), proc); } } else if (proc instanceof PeerProcedureInterface) { tryCleanupPeerQueue(getPeerId(proc), proc); @@ -722,7 +774,9 @@ boolean markTableAsDeleted(final TableName table, final Procedure procedure) try { final TableQueue queue = getTableQueue(table); final LockAndQueue tableLock = locking.getTableLock(table); - if (queue == null) return true; + if (queue == null) { + return true; + } if (queue.isEmpty() && tableLock.tryExclusiveLock(procedure)) { // remove the table from the run-queue and the map @@ -1149,6 +1203,7 @@ public String toString() { serverBucketToString(builder, "serverBuckets[" + i + "]", serverBuckets[i]); } builder.append("tableMap", tableMap); + builder.append("tableWaitingMap", tableProcsWaitingEnqueue); builder.append("peerMap", peerMap); builder.append("metaMap", metaMap); builder.append("globalMap", globalMap); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureWaitingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureWaitingQueue.java new file mode 100644 index 000000000000..fbd5037c8804 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureWaitingQueue.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.util.ArrayDeque; +import java.util.Optional; +import java.util.Queue; +import java.util.function.Function; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +/** + * To prevent multiple Create/Modify/Disable/Enable table procedures run at the same time, we will + * keep table procedure in this queue first before actually enqueuing it to + * MasterProcedureScheduler's tableQueue. See HBASE-28683 for more details + */ +@InterfaceAudience.Private +class TableProcedureWaitingQueue { + + private final Function> procedureRetriever; + + // whether there is already a table procedure enqueued in ProcedureScheduler. + private Procedure enqueuedProc; + + private final Queue> queue = new ArrayDeque<>(); + + TableProcedureWaitingQueue(Function> procedureRetriever) { + this.procedureRetriever = procedureRetriever; + } + + private boolean isSubProcedure(Procedure proc) { + while (proc.hasParent()) { + if (proc.getParentProcId() == enqueuedProc.getProcId()) { + return true; + } + proc = Preconditions.checkNotNull(procedureRetriever.apply(proc.getParentProcId()), + "can not find parent procedure pid=%s", proc.getParentProcId()); + } + return false; + } + + /** + * Return whether we can enqueue this procedure to ProcedureScheduler. + *

    + * If returns {@code true}, you should enqueue this procedure, otherwise you just need to do + * nothing, as we will queue it in the waitingQueue, and you will finally get it again by calling + * {@link #procedureCompleted(Procedure)} method in the future. + */ + boolean procedureSubmitted(Procedure proc) { + if (enqueuedProc == null) { + // no procedure enqueued yet, record it and return + enqueuedProc = proc; + return true; + } + if (proc == enqueuedProc) { + // the same procedure is enqueued again, this usually because the procedure comes back from + // WAITING state, such as all child procedures are finished + return true; + } + // check whether this is a sub procedure of the enqueued procedure + if (isSubProcedure(proc)) { + return true; + } + queue.add(proc); + return false; + } + + /** + * Return the next procedure which can be enqueued to ProcedureScheduler. + */ + Optional> procedureCompleted(Procedure proc) { + Preconditions.checkState(enqueuedProc != null, "enqueued procedure should not be null"); + if (enqueuedProc == proc) { + if (!queue.isEmpty()) { + enqueuedProc = queue.poll(); + return Optional.of(enqueuedProc); + } else { + enqueuedProc = null; + return Optional.empty(); + } + } else { + Preconditions.checkState(isSubProcedure(proc), + "procedure %s is not a sub procedure of enqueued procedure %s", proc, enqueuedProc); + return Optional.empty(); + } + } + + boolean isEmpty() { + return enqueuedProc == null; + } + + int waitingSize() { + return queue.size(); + } + + @Override + public String toString() { + return "TableProcedureWaitingQueue [enqueuedProc=" + enqueuedProc + ", queue=" + queue + "]"; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index 36c9df6e794e..078dc8313863 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -47,7 +47,7 @@ public boolean requireExclusiveLock(Procedure proc) { /** * @param proc must not be null */ - private static boolean requireTableExclusiveLock(TableProcedureInterface proc) { + static boolean requireTableExclusiveLock(TableProcedureInterface proc) { switch (proc.getTableOperationType()) { case CREATE: case DELETE: diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java index 8ec56b9331f1..47bcb01a7ddc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java @@ -124,6 +124,7 @@ public LockState acquireLock(Void env) { @Override public void releaseLock(Void env) { procedureScheduler.wakeTableExclusiveLock(this, getTableName()); + procedureScheduler.completionCleanup(this); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java index 0cf34126a945..7dc09837c478 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java @@ -27,7 +27,9 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -65,12 +67,15 @@ public class TestMasterProcedureScheduler { private MasterProcedureScheduler queue; + private Map> procedures; + @Rule public TestName name = new TestName(); @Before public void setUp() throws IOException { - queue = new MasterProcedureScheduler(pid -> null); + procedures = new HashMap<>(); + queue = new MasterProcedureScheduler(procedures::get); queue.start(); } @@ -95,7 +100,7 @@ public void testSimpleTableOpsQueues() throws Exception { // insert items for (int j = 1; j <= NUM_ITEMS; ++j) { queue.addBack(new TestTableProcedure(i * 1000 + j, tableName, - TableProcedureInterface.TableOperationType.EDIT)); + TableProcedureInterface.TableOperationType.REGION_EDIT)); assertEquals(++count, queue.size()); } } @@ -222,7 +227,7 @@ public void testVerifyRwLocks() throws Exception { assertEquals(null, queue.poll(0)); // Release the write lock and acquire the read lock - queue.wakeTableExclusiveLock(proc, tableName); + releaseTableExclusiveLockAndComplete(proc, tableName); // Fetch the 2nd item and take the read lock Procedure rdProc = queue.poll(); @@ -248,7 +253,7 @@ public void testVerifyRwLocks() throws Exception { assertEquals(null, queue.poll(0)); // Release the write lock and acquire the read lock - queue.wakeTableExclusiveLock(wrProc, tableName); + releaseTableExclusiveLockAndComplete(wrProc, tableName); // Fetch the 4th item and take the read lock rdProc = queue.poll(); @@ -641,6 +646,13 @@ public void testRootXLockAndChildrenSharedLock() throws Exception { childProcs); } + private void releaseTableExclusiveLockAndComplete(Procedure proc, TableName tableName) { + // release xlock + queue.wakeTableExclusiveLock(proc, tableName); + // mark the procedure as complete + queue.completionCleanup(proc); + } + private void testInheritedXLockAndChildrenSharedLock(final TableName tableName, final TestTableProcedure rootProc, final TestRegionProcedure[] childProcs) throws Exception { queue.addBack(rootProc); @@ -671,13 +683,13 @@ private void testInheritedXLockAndChildrenSharedLock(final TableName tableName, assertEquals(null, queue.poll(0)); // release xlock - queue.wakeTableExclusiveLock(parentProc, tableName); + releaseTableExclusiveLockAndComplete(parentProc, tableName); // fetch the other xlock proc Procedure proc = queue.poll(); assertEquals(100, proc.getProcId()); assertEquals(false, queue.waitTableExclusiveLock(proc, tableName)); - queue.wakeTableExclusiveLock(proc, tableName); + releaseTableExclusiveLockAndComplete(proc, tableName); } @Test @@ -694,29 +706,35 @@ public void testRootXLockAndChildrenXLock() throws Exception { // simulate 3 procedures: 1 (root), (2) child of root, (3) child of proc-2 testInheritedXLockAndChildrenXLock(tableName, new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.EDIT), + new TestTableProcedure(1, 1, 2, tableName, TableProcedureInterface.TableOperationType.EDIT), new TestTableProcedure(1, 2, 3, tableName, TableProcedureInterface.TableOperationType.EDIT)); } private void testInheritedXLockAndChildrenXLock(final TableName tableName, - final TestTableProcedure rootProc, final TestTableProcedure childProc) throws Exception { + final TestTableProcedure rootProc, final TestTableProcedure... childProcs) throws Exception { + procedures.put(rootProc.getProcId(), rootProc); + for (TestTableProcedure childProc : childProcs) { + procedures.put(childProc.getProcId(), childProc); + } queue.addBack(rootProc); // fetch and acquire first xlock proc Procedure parentProc = queue.poll(); - assertEquals(rootProc, parentProc); + assertSame(rootProc, parentProc); assertEquals(false, queue.waitTableExclusiveLock(parentProc, tableName)); + TestTableProcedure childProc = childProcs[childProcs.length - 1]; // add child procedure queue.addFront(childProc); // fetch the other xlock proc Procedure proc = queue.poll(); - assertEquals(childProc, proc); + assertSame(childProc, proc); assertEquals(false, queue.waitTableExclusiveLock(proc, tableName)); - queue.wakeTableExclusiveLock(proc, tableName); + releaseTableExclusiveLockAndComplete(proc, tableName); // release xlock - queue.wakeTableExclusiveLock(parentProc, tableName); + releaseTableExclusiveLockAndComplete(proc, tableName); } @Test @@ -744,7 +762,7 @@ public void testYieldWithXLockHeld() throws Exception { assertEquals(1, proc.getProcId()); // release the xlock - queue.wakeTableExclusiveLock(proc, tableName); + releaseTableExclusiveLockAndComplete(proc, tableName); proc = queue.poll(); assertEquals(2, proc.getProcId()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java index dfb1a968b101..5729753b84b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java @@ -264,7 +264,9 @@ public Procedure acquire() { boolean waiting = true; while (waiting && queue.size() > 0) { proc = queue.poll(100000000L); - if (proc == null) continue; + if (proc == null) { + continue; + } switch (getTableOperationType(proc)) { case CREATE: case DELETE: @@ -294,6 +296,7 @@ public void release(Procedure proc) { default: throw new UnsupportedOperationException(); } + queue.completionCleanup(proc); } public TableName getTableName(Procedure proc) { From 83fb64ffabe33228c58791414ff5a8d2ffb64de0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 17 Jul 2024 09:29:17 +0800 Subject: [PATCH 463/514] HBASE-28683 Addendum forward port the branch-2 patch to remove the usage of JDK9+ features --- .../hbase/master/procedure/MasterProcedureScheduler.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index 9f81019606e7..6c0e99f0df4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -365,16 +366,18 @@ public void completionCleanup(final Procedure proc) { TableProcedureWaitingQueue waitingQueue = tableProcsWaitingEnqueue.get(tableProc.getTableName()); if (waitingQueue != null) { - waitingQueue.procedureCompleted(proc).ifPresentOrElse(next -> { + Optional> nextProc = waitingQueue.procedureCompleted(proc); + if (nextProc.isPresent()) { // enqueue it + Procedure next = nextProc.get(); LOG.debug("{} completed, enqueue a new procedure {}", proc, next); doAdd(tableRunQueue, getTableQueue(tableProc.getTableName()), next, false); - }, () -> { + } else { if (waitingQueue.isEmpty()) { // there is no waiting procedures in it, remove tableProcsWaitingEnqueue.remove(tableProc.getTableName()); } - }); + } } else { // this should not happen normally, warn it LOG.warn("no waiting queue while completing {}, which should not happen", proc); From d7a9fc81217aca648f3998b924ef7ee772317f39 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 17 Jul 2024 10:43:22 +0800 Subject: [PATCH 464/514] HBASE-28708 Remove the specific logic for jdk11 in hbase-assembly and change our script to only support jdk17 (#6069) Signed-off-by: Xin Sun --- bin/hbase | 97 ++++--------------- bin/hbase-config.sh | 14 ++- .../src/main/assembly/hadoop-three-compat.xml | 67 +------------ 3 files changed, 31 insertions(+), 147 deletions(-) diff --git a/bin/hbase b/bin/hbase index 81379eaa587d..ff0c57c055c7 100755 --- a/bin/hbase +++ b/bin/hbase @@ -158,15 +158,7 @@ fi # establish a default value for HBASE_OPTS if it's not already set. For now, # all we set is the garbage collector. if [ -z "${HBASE_OPTS}" ] ; then - major_version_number="$(parse_java_major_version "$(read_java_version)")" - case "$major_version_number" in - 8|9|10) - HBASE_OPTS="-XX:+UseConcMarkSweepGC" - ;; - 11|*) - HBASE_OPTS="-XX:+UseG1GC" - ;; - esac + HBASE_OPTS="-XX:+UseG1GC" export HBASE_OPTS fi @@ -487,17 +479,22 @@ add_maven_deps_to_classpath() { CLASSPATH=${CLASSPATH}:$(cat "${f}") } -add_jdk11_deps_to_classpath() { - for f in ${HBASE_HOME}/lib/jdk11/*; do - if [ -f "${f}" ]; then - CLASSPATH="${CLASSPATH}:${f}" - fi - done -} - -add_jdk11_jvm_flags() { - # Keep in sync with hbase-surefire.jdk11.flags in the root pom.xml - HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens java.base/java.lang.reflect=ALL-UNNAMED --add-opens java.base/java.util=ALL-UNNAMED --add-opens java.base/java.util.concurrent=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports java.base/sun.net.dns=ALL-UNNAMED --add-exports java.base/sun.net.util=ALL-UNNAMED" +add_jdk17_jvm_flags() { + # Keep in sync with hbase-surefire.jdk17.flags in the root pom.xml + HBASE_OPTS="$HBASE_OPTS -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true" + HBASE_OPTS="$HBASE_OPTS --add-modules jdk.unsupported" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/java.io=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/java.nio=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/sun.nio.ch=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/java.lang=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/jdk.internal.ref=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/java.lang.reflect=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/java.util=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-opens java.base/java.util.concurrent=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-exports java.base/jdk.internal.misc=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-exports java.base/sun.net.dns=ALL-UNNAMED" + HBASE_OPTS="$HBASE_OPTS --add-exports java.base/sun.net.util=ALL-UNNAMED" } add_opentelemetry_agent() { @@ -566,12 +563,6 @@ if [ "$COMMAND" = "shell" ] ; then fi HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS" elif [ "$COMMAND" = 'jshell' ] ; then - java_version="$(read_java_version)" - major_version_number="$(parse_java_major_version "${java_version}")" - if [ "${major_version_number}" -lt 9 ] ; then - echo "JShell is available only with JDK9 and lated. Detected JDK version is ${java_version}". - exit 1 - fi CLASS='jdk.internal.jshell.tool.JShellToolProvider' # set default values for HBASE_JSHELL_ARGS read -r -a JSHELL_ARGS <<< "${HBASE_JSHELL_ARGS:-"--startup DEFAULT --startup PRINTING --startup ${HBASE_HOME}/bin/hbase_startup.jsh"}" @@ -784,59 +775,9 @@ if [[ "$CLASS" =~ .*IntegrationTest.* ]] ; then fi fi -# Add lib/jdk11 jars to the classpath - +add_jdk17_jvm_flags if [ "${DEBUG}" = "true" ]; then - echo "Deciding on addition of lib/jdk11 jars to the classpath and setting JVM module flags" -fi - -addJDK11Jars=false - -if [ "${HBASE_JDK11}" != "" ]; then - # Use the passed Environment Variable HBASE_JDK11 - if [ "${HBASE_JDK11}" = "include" ]; then - addJDK11Jars=true - if [ "${DEBUG}" = "true" ]; then - echo "HBASE_JDK11 set as 'include' hence adding JDK11 jars to classpath." - fi - elif [ "${HBASE_JDK11}" = "exclude" ]; then - if [ "${DEBUG}" = "true" ]; then - echo "HBASE_JDK11 set as 'exclude' hence skipping JDK11 jars to classpath." - fi - else - echo "[HBASE_JDK11] contains unsupported value(s) - ${HBASE_JDK11}. Ignoring passed value." - echo "[HBASE_JDK11] supported values: [include, exclude]." - fi -else - # Use JDK detection - version="$(read_java_version)" - major_version_number="$(parse_java_major_version "$version")" - - if [ "${DEBUG}" = "true" ]; then - echo "HBASE_JDK11 not set hence using JDK detection." - echo "Extracted JDK version - ${version}, major_version_number - ${major_version_number}" - fi - - if [[ "$major_version_number" -ge "11" ]]; then - if [ "${DEBUG}" = "true" ]; then - echo "Version ${version} is greater-than/equal to 11 hence adding JDK11 jars to classpath." - fi - addJDK11Jars=true - elif [ "${DEBUG}" = "true" ]; then - echo "Version ${version} is lesser than 11 hence skipping JDK11 jars from classpath." - fi -fi - -if [ "${addJDK11Jars}" = "true" ]; then - add_jdk11_deps_to_classpath - add_jdk11_jvm_flags - if [ "${DEBUG}" = "true" ]; then - echo "Added JDK11 jars to classpath." - echo "Added JDK11 JVM flags too." - fi -elif [ "${DEBUG}" = "true" ]; then - echo "JDK11 jars skipped from classpath." - echo "Skipped adding JDK11 JVM flags." + echo "Added JDK17 JVM flags." fi if [[ "${HBASE_OTEL_TRACING_ENABLED:-false}" = "true" ]] ; then diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 0e8b3feed213..d9afa19a4aaa 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -164,17 +164,17 @@ export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4} # Now having JAVA_HOME defined is required if [ -z "$JAVA_HOME" ]; then - cat 1>&2 <&2 < http://www.oracle.com/technetwork/java/javase/downloads | | | -| HBase requires Java 1.8 or later. | +| HBase requires Java 17 or later. | +======================================================================+ EOF - exit 1 + exit 1 fi function read_java_version() { @@ -203,3 +203,11 @@ function parse_java_major_version() { ;; esac } + +# test whether we are on jdk17 or above +java_version="$(read_java_version)" +major_version_number="$(parse_java_major_version "$java_version")" +if [ "${major_version_number}" -lt 17 ] ; then + echo "HBase can only be run on JDK17 and later. Detected JDK version is ${java_version}". + exit 1 +fi diff --git a/hbase-assembly/src/main/assembly/hadoop-three-compat.xml b/hbase-assembly/src/main/assembly/hadoop-three-compat.xml index 27962b6e473c..244de766ce72 100644 --- a/hbase-assembly/src/main/assembly/hadoop-three-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-three-compat.xml @@ -65,36 +65,7 @@ - - com.sun.activation:javax.activation - - com.sun.xml.ws:* - jakarta.annotation:jakarta.annotation-api - org.glassfish.jaxb:* - com.sun.istack:istack-commons-runtime - org.glassfish.gmbal:gmbal - org.glassfish.external:management-api - org.glassfish.pfl:* - org.jvnet.staxex:stax-ex - com.sun.xml.stream.buffer:streambuffer - org.jvnet.mimepull:mimepull - com.sun.xml.fastinfoset:FastInfoset - org.glassfish.ha:ha-api - com.sun.xml.messaging.saaj:saaj-impl - jakarta.activation:jakarta.activation-api - com.sun.xml.bind:jaxb-xjc - com.sun.xml.bind:jaxb-jxc - jakarta.mail:jakarta.mail-api - jakarta.persistence:jakarta.persistence-api - org.eclipse.persistence:* - jakarta.xml.ws:jakarta.xml.ws-api - jakarta.xml.bind:jakarta.xml.bind-api - jakarta.xml.soap:jakarta.xml.soap-api - jakarta.jws:jakarta.jws-api - + org.jruby:jruby-complete com.sun.jersey:* com.sun.jersey.contribs:* @@ -221,42 +192,6 @@ jline:jline - - lib/jdk11 - true - - com.sun.activation:javax.activation - - com.sun.xml.ws:* - jakarta.annotation:jakarta.annotation-api - org.glassfish.jaxb:* - com.sun.istack:istack-commons-runtime - org.glassfish.gmbal:gmbal - org.glassfish.external:management-api - org.glassfish.pfl:* - org.jvnet.staxex:stax-ex - com.sun.xml.stream.buffer:streambuffer - org.jvnet.mimepull:mimepull - com.sun.xml.fastinfoset:FastInfoset - org.glassfish.ha:ha-api - com.sun.xml.messaging.saaj:saaj-impl - com.fasterxml.woodstox:woodstox-core - org.codehaus.woodstox:stax2-api - jakarta.activation:jakarta.activation-api - com.sun.xml.bind:jaxb-xjc - com.sun.xml.bind:jaxb-jxc - jakarta.mail:jakarta.mail-api - jakarta.persistence:jakarta.persistence-api - org.eclipse.persistence:* - jakarta.xml.ws:jakarta.xml.ws-api - jakarta.xml.bind:jakarta.xml.bind-api - jakarta.xml.soap:jakarta.xml.soap-api - jakarta.jws:jakarta.jws-api - - lib/trace From 4b3b5e6c2519980afd07034384e4382e0f9eb34d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 17 Jul 2024 22:28:38 +0800 Subject: [PATCH 465/514] HBASE-28731 Remove the IA.Private annotation on WALEdit's add methods as they have already been used by CP users (#6083) Signed-off-by: Istvan Toth Signed-off-by: Pankaj Kumar --- .../main/java/org/apache/hadoop/hbase/wal/WALEdit.java | 9 --------- 1 file changed, 9 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index c387dfe13ac0..01dbe06682bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -46,10 +46,6 @@ * that came in as a single transaction. All the edits for a given transaction are written out as a * single record, in PB format, followed (optionally) by Cells written via the WALCellEncoder. *

    - * This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are classified as - * private methods, not for use by CPs. - *

    - *

    * A particular WALEdit 'type' is the 'meta' type used to mark key operational events in the WAL * such as compaction, flush, or region open. These meta types do not traverse hbase memstores. They * are edits made by the hbase system rather than edit data submitted by clients. They only show in @@ -73,7 +69,6 @@ * file. See the hand-off in FSWALEntry Constructor. * @see WALKey */ -// TODO: Do not expose this class to Coprocessors. It has set methods. A CP might meddle. @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION, HBaseInterfaceAudience.COPROC }) public class WALEdit implements HeapSize { @@ -253,20 +248,17 @@ public boolean isReplay() { return this.replay; } - @InterfaceAudience.Private public WALEdit add(Cell cell, byte[] family) { getOrCreateFamilies().add(family); return addCell(cell); } - @InterfaceAudience.Private public WALEdit add(Cell cell) { // We clone Family each time we add a Cell. Expensive but safe. For CPU savings, use // add(Map) or add(Cell, family). return add(cell, CellUtil.cloneFamily(cell)); } - @InterfaceAudience.Private public WALEdit add(List cells) { if (cells == null || cells.isEmpty()) { return this; @@ -294,7 +286,6 @@ public ArrayList getCells() { * that nothing else depends on the contents being immutable. * @param cells the list of cells that this WALEdit now contains. */ - @InterfaceAudience.Private // Used by replay. public void setCells(ArrayList cells) { this.cells = cells; From c3e40d692174b72bfc587eb1ce410a8396b8fc39 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 17 Jul 2024 12:55:34 -0700 Subject: [PATCH 466/514] HBASE-28739 Update downloads.xml for 2.5.9 Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 1a3e08ebe184..02e1b11c2d00 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -95,26 +95,26 @@ under the License. - 2.5.8 + 2.5.9 - 2024/03/12 + 2024/07/15 - 2.5.8 vs 2.5.7 + 2.5.9 vs 2.5.8 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc)
    - hadoop3-bin (sha512 asc)
    - hadoop3-client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc)
    + hadoop3-bin (sha512 asc)
    + hadoop3-client-bin (sha512 asc) stable release From 8f5516d219d300a61613e9a53566f240dd73ad1d Mon Sep 17 00:00:00 2001 From: Divneet18 Date: Wed, 17 Jul 2024 23:33:15 -0700 Subject: [PATCH 467/514] HBASE-28428 : Zookeeper ConnectionRegistry APIs should have timeout (#5837) Signed-off-by: Duo Zhang Signed-off-by: Pankaj Kumar Signed-off-by: Viraj Jasani --- .../hbase/client/ZKConnectionRegistry.java | 17 +++++--- .../hbase/zookeeper/ReadOnlyZKClient.java | 39 ++++++++++++++++++- .../hbase/zookeeper/TestReadOnlyZKClient.java | 26 ++++++++++++- 3 files changed, 75 insertions(+), 7 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index a46f4d74e382..8c4bdf4d51c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -69,11 +69,18 @@ class ZKConnectionRegistry implements ConnectionRegistry { private final ReadOnlyZKClient zk; private final ZNodePaths znodePaths; - + private final Configuration conf; + private final int zkRegistryAsyncTimeout; + public static final String ZK_REGISTRY_ASYNC_GET_TIMEOUT = "zookeeper.registry.async.get.timeout"; + public static final int DEFAULT_ZK_REGISTRY_ASYNC_GET_TIMEOUT = 60000; // 1 min // User not used, but for rpc based registry we need it + ZKConnectionRegistry(Configuration conf, User ignored) { this.znodePaths = new ZNodePaths(conf); - this.zk = new ReadOnlyZKClient(conf); + this.zk = new ReadOnlyZKClient(conf, AsyncConnectionImpl.RETRY_TIMER); + this.conf = conf; + this.zkRegistryAsyncTimeout = + conf.getInt(ZK_REGISTRY_ASYNC_GET_TIMEOUT, DEFAULT_ZK_REGISTRY_ASYNC_GET_TIMEOUT); if (NEEDS_LOG_WARN) { synchronized (WARN_LOCK) { if (NEEDS_LOG_WARN) { @@ -91,7 +98,7 @@ private interface Converter { private CompletableFuture getAndConvert(String path, Converter converter) { CompletableFuture future = new CompletableFuture<>(); - addListener(zk.get(path), (data, error) -> { + addListener(zk.get(path, this.zkRegistryAsyncTimeout), (data, error) -> { if (error != null) { future.completeExceptionally(error); return; @@ -218,8 +225,8 @@ public CompletableFuture getMetaRegionLocations() { return tracedFuture(() -> { CompletableFuture future = new CompletableFuture<>(); addListener( - zk.list(znodePaths.baseZNode).thenApply(children -> children.stream() - .filter(c -> this.znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())), + zk.list(znodePaths.baseZNode, this.zkRegistryAsyncTimeout).thenApply(children -> children + .stream().filter(c -> this.znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())), (metaReplicaZNodes, error) -> { if (error != null) { future.completeExceptionally(error); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 64b151dc19a5..6c26f089d742 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -43,6 +43,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; +import org.apache.hbase.thirdparty.io.netty.util.TimerTask; + /** * A very simple read only zookeeper implementation without watcher support. */ @@ -76,6 +79,8 @@ public final class ReadOnlyZKClient implements Closeable { private final int keepAliveTimeMs; + private HashedWheelTimer retryTimer; + private final ZKClientConfig zkClientConfig; private static abstract class Task implements Delayed { @@ -126,7 +131,7 @@ private String getId() { return String.format("0x%08x", System.identityHashCode(this)); } - public ReadOnlyZKClient(Configuration conf) { + public ReadOnlyZKClient(Configuration conf, HashedWheelTimer retryTimer) { // We might use a different ZK for client access String clientZkQuorumServers = ZKConfig.getClientZKQuorumServersString(conf); if (clientZkQuorumServers != null) { @@ -140,6 +145,7 @@ public ReadOnlyZKClient(Configuration conf) { conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); this.zkClientConfig = ZKConfig.getZKClientConfig(conf); + this.retryTimer = retryTimer; LOG.debug( "Connect {} to {} with session timeout={}ms, retries={}, " + "retry interval={}ms, keepAlive={}ms, zk client config={}", @@ -258,6 +264,23 @@ public void closed(IOException e) { } } + private static TimerTask getTimerTask(final long timeoutMs, final CompletableFuture future, + final String api) { + return timeout -> { + if (!future.isDone()) { + future.completeExceptionally(new DoNotRetryIOException( + "Zookeeper " + api + " could not be completed in " + timeoutMs + " ms")); + } + }; + } + + public CompletableFuture get(final String path, final long timeoutMs) { + CompletableFuture future = get(path); + TimerTask timerTask = getTimerTask(timeoutMs, future, "GET"); + retryTimer.newTimeout(timerTask, timeoutMs + 1, TimeUnit.MILLISECONDS); + return future; + } + public CompletableFuture get(String path) { if (closed.get()) { return FutureUtils.failedFuture(new DoNotRetryIOException("Client already closed")); @@ -274,6 +297,13 @@ protected void doExec(ZooKeeper zk) { return future; } + public CompletableFuture exists(String path, long timeoutMs) { + CompletableFuture future = exists(path); + TimerTask timerTask = getTimerTask(timeoutMs, future, "EXISTS"); + retryTimer.newTimeout(timerTask, timeoutMs + 1, TimeUnit.MILLISECONDS); + return future; + } + public CompletableFuture exists(String path) { if (closed.get()) { return FutureUtils.failedFuture(new DoNotRetryIOException("Client already closed")); @@ -289,6 +319,13 @@ protected void doExec(ZooKeeper zk) { return future; } + public CompletableFuture> list(String path, long timeoutMs) { + CompletableFuture> future = list(path); + TimerTask timerTask = getTimerTask(timeoutMs, future, "LIST"); + retryTimer.newTimeout(timerTask, timeoutMs + 1, TimeUnit.MILLISECONDS); + return future; + } + public CompletableFuture> list(String path) { if (closed.get()) { return FutureUtils.failedFuture(new DoNotRetryIOException("Client already closed")); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java index 2f08f6276db4..23a8c339cd71 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java @@ -43,6 +43,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.Exchanger; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtil; @@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ZKTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -63,6 +65,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; + @Category({ ZKTests.class, MediumTests.class }) public class TestReadOnlyZKClient { @@ -79,6 +84,10 @@ public class TestReadOnlyZKClient { private static int CHILDREN = 5; private static ReadOnlyZKClient RO_ZK; + private static final HashedWheelTimer RETRY_TIMER = new HashedWheelTimer( + new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + 10, TimeUnit.MILLISECONDS); @BeforeClass public static void setUp() throws Exception { @@ -98,13 +107,14 @@ public static void setUp() throws Exception { conf.setInt(ReadOnlyZKClient.RECOVERY_RETRY, 3); conf.setInt(ReadOnlyZKClient.RECOVERY_RETRY_INTERVAL_MILLIS, 100); conf.setInt(ReadOnlyZKClient.KEEPALIVE_MILLIS, 3000); - RO_ZK = new ReadOnlyZKClient(conf); + RO_ZK = new ReadOnlyZKClient(conf, RETRY_TIMER); // only connect when necessary assertNull(RO_ZK.zookeeper); } @AfterClass public static void tearDown() throws IOException { + RETRY_TIMER.stop(); RO_ZK.close(); UTIL.shutdownMiniZKCluster(); UTIL.cleanupTestDir(); @@ -204,4 +214,18 @@ public void testNotCloseZkWhenPending() throws Exception { waitForIdleConnectionClosed(); verify(mockedZK, times(1)).close(); } + + @Test + public void testReadWithTimeout() throws Exception { + assertArrayEquals(DATA, RO_ZK.get(PATH, 60000).get()); + assertEquals(CHILDREN, RO_ZK.exists(PATH, 60000).get().getNumChildren()); + List children = RO_ZK.list(PATH, 60000).get(); + assertEquals(CHILDREN, children.size()); + Collections.sort(children); + for (int i = 0; i < CHILDREN; i++) { + assertEquals("c" + i, children.get(i)); + } + assertNotNull(RO_ZK.zookeeper); + waitForIdleConnectionClosed(); + } } From 724d98d1b89745850f1b8f7758538a4fa656f01c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 18 Jul 2024 14:48:49 +0800 Subject: [PATCH 468/514] HBASE-28736 Modify our ref guide about the slack channel change (#6086) Signed-off-by: Yi Mei --- src/main/asciidoc/_chapters/developer.adoc | 7 +++++-- src/main/asciidoc/_chapters/troubleshooting.adoc | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index ec3c4659bf09..11a3c89b0beb 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -51,8 +51,11 @@ Posing questions - and helping to answer other people's questions - is encourage [[slack]] === Slack -The Apache HBase project has its own link: http://apache-hbase.slack.com[Slack Channel] for real-time questions -and discussion. Mail dev@hbase.apache.org to request an invite. + +The Apache HBase project uses the #hbase channel on the official +https://the-asf.slack.com/[ASF Slack Workspace] for real-time questions and discussion. +All committers of any Apache projects can join the channel directly, for others, please mail +dev@hbase.apache.org to request an invite. [[irc]] === Internet Relay Chat (IRC) diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc b/src/main/asciidoc/_chapters/troubleshooting.adoc index 411b9b8ef6cd..dea70fb200b8 100644 --- a/src/main/asciidoc/_chapters/troubleshooting.adoc +++ b/src/main/asciidoc/_chapters/troubleshooting.adoc @@ -235,7 +235,7 @@ A quality question that includes all context and exhibits evidence the author ha [[trouble.resources.slack]] === Slack -See http://apache-hbase.slack.com Channel on Slack +#hbase on https://the-asf.slack.com/ [[trouble.resources.irc]] === IRC From 38e5983912ff8c408bf1fac98a837c70417086ea Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 18 Jul 2024 14:49:11 +0800 Subject: [PATCH 469/514] HBASE-28737 Add the slack channel related information in README.md (#6087) Signed-off-by: Yi Mei --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e944bd8c570d..56d3d70b6675 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,8 @@ The latest HBase can be downloaded from the [download page](https://hbase.apache We use mailing lists to send notice and discuss. The mailing lists and archives are listed [here](http://hbase.apache.org/mail-lists.html) +We use the #hbase channel on the official [ASF Slack Workspace](https://the-asf.slack.com/) for real time questions and discussions. Please mail dev@hbase.apache.org to request an invite. + # How to Contribute The source code can be found at https://hbase.apache.org/source-repository.html From b4cbd5c84b2402c5779e79d7076cea94be014d21 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Thu, 18 Jul 2024 07:45:42 -0400 Subject: [PATCH 470/514] HBASE-28686 MapReduceBackupCopyJob should support custom DistCp options (#6017) Co-authored-by: Ray Mattingly Signed-off-by: Duo Zhang Signed-off-by: Nick Dimiduk --- .../mapreduce/MapReduceBackupCopyJob.java | 37 ++++++++++++- .../mapreduce/TestMapReduceBackupCopyJob.java | 55 +++++++++++++++++++ 2 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/mapreduce/TestMapReduceBackupCopyJob.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java index 51a276df4c5a..747bd3e217d9 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java @@ -22,6 +22,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.math.BigDecimal; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -45,6 +46,7 @@ import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; +import org.apache.hadoop.tools.DistCpOptionSwitch; import org.apache.hadoop.tools.DistCpOptions; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -58,6 +60,10 @@ @InterfaceAudience.Private public class MapReduceBackupCopyJob implements BackupCopyJob { public static final String NUMBER_OF_LEVELS_TO_PRESERVE_KEY = "num.levels.preserve"; + + // This prefix specifies the DistCp options to be used during backup copy + public static final String BACKUP_COPY_OPTION_PREFIX = "hbase.backup.copy."; + private static final Logger LOG = LoggerFactory.getLogger(MapReduceBackupCopyJob.class); private Configuration conf; @@ -394,7 +400,15 @@ public int copy(BackupInfo context, BackupManager backupManager, Configuration c if (!destfs.exists(dest)) { destfs.mkdirs(dest); } - res = distcp.run(newOptions); + + List distCpOptionsFromConf = parseDistCpOptions(conf); + String[] finalOptions = new String[newOptions.length + distCpOptionsFromConf.size()]; + for (int i = 0; i < distCpOptionsFromConf.size(); i++) { + finalOptions[i] = distCpOptionsFromConf.get(i); + } + System.arraycopy(newOptions, 0, finalOptions, distCpOptionsFromConf.size(), + newOptions.length); + res = distcp.run(finalOptions); } return res; @@ -425,4 +439,25 @@ public void cancel(String jobId) throws IOException { } } + protected static List parseDistCpOptions(Configuration conf) { + List extraArgsFromConf = new ArrayList<>(); + + for (DistCpOptionSwitch optionSwitch : DistCpOptionSwitch.values()) { + String configLabel = BACKUP_COPY_OPTION_PREFIX + optionSwitch.getConfigLabel(); + if (conf.get(configLabel) != null) { + if (optionSwitch.getOption().hasArg()) { + extraArgsFromConf.add("-" + optionSwitch.getOption().getOpt()); + extraArgsFromConf.add(conf.get(configLabel)); + } else { + boolean value = conf.getBoolean(configLabel, false); + if (value) { + extraArgsFromConf.add("-" + optionSwitch.getOption().getOpt()); + } + } + } + } + + return extraArgsFromConf; + } + } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/mapreduce/TestMapReduceBackupCopyJob.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/mapreduce/TestMapReduceBackupCopyJob.java new file mode 100644 index 000000000000..6e35815c84f8 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/mapreduce/TestMapReduceBackupCopyJob.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.mapreduce; + +import static org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob.BACKUP_COPY_OPTION_PREFIX; +import static org.apache.hadoop.tools.DistCpConstants.CONF_LABEL_DIRECT_WRITE; +import static org.apache.hadoop.tools.DistCpConstants.CONF_LABEL_MAX_MAPS; +import static org.junit.Assert.assertEquals; + +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + +@Category(SmallTests.class) +public class TestMapReduceBackupCopyJob { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMapReduceBackupCopyJob.class); + + @Test + public void testDistCpOptionParsing() { + Configuration conf = new Configuration(); + conf.setInt(BACKUP_COPY_OPTION_PREFIX + CONF_LABEL_MAX_MAPS, 1000); + conf.setBoolean(BACKUP_COPY_OPTION_PREFIX + CONF_LABEL_DIRECT_WRITE, true); + List args = MapReduceBackupCopyJob.parseDistCpOptions(conf); + + List expectedArgs = + ImmutableList. builder().add("-m", "1000").add("-direct").build(); + + assertEquals(args, expectedArgs); + } + +} From aeeb8556dbe4434695fd8ef2b4d89a84e616e201 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 19 Jul 2024 00:03:19 +0800 Subject: [PATCH 471/514] HBASE-28740 Need to call parent class's serialization methods in CloseExcessRegionReplicasProcedure (#6090) Signed-off-by: Andrew Purtell Signed-off-by: Pankaj Kumar --- .../master/procedure/CloseExcessRegionReplicasProcedure.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java index 61e7c0f86075..bb5da2cc48e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java @@ -142,6 +142,7 @@ protected CloseExcessRegionReplicasProcedureState getInitialState() { @Override protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.serializeStateData(serializer); CloseExcessRegionReplicasProcedureStateData data = CloseExcessRegionReplicasProcedureStateData .newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) .setNewReplicaCount(newReplicaCount).build(); @@ -150,6 +151,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO @Override protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.deserializeStateData(serializer); CloseExcessRegionReplicasProcedureStateData data = serializer.deserialize(CloseExcessRegionReplicasProcedureStateData.class); tableName = ProtobufUtil.toTableName(data.getTableName()); From b7d11a74451d7b5a132445ad75b4463e8ca0f3c5 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Fri, 19 Jul 2024 06:46:36 -0400 Subject: [PATCH 472/514] HBASE-28716 Users of QuotaRetriever should pass an existing connection (#6065) Signed-off-by: Nick Dimiduk Signed-off-by: Pankaj Kumar --- .../hadoop/hbase/quotas/QuotaRetriever.java | 31 +++++++++---- .../hbase/quotas/QuotaObserverChore.java | 3 +- .../quotas/SnapshotQuotaObserverChore.java | 4 +- .../resources/hbase-webapps/master/quotas.jsp | 3 +- .../quotas/SpaceQuotaHelperForTests.java | 18 ++------ .../quotas/TestMasterQuotasObserver.java | 26 +++++------ .../hadoop/hbase/quotas/TestQuotaAdmin.java | 43 +++++-------------- 7 files changed, 56 insertions(+), 72 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index 1dd5bf275bba..1d902ff9b718 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -55,19 +55,29 @@ public class QuotaRetriever implements Closeable, Iterable { /** * Should QutoaRetriever manage the state of the connection, or leave it be. */ - private boolean isManagedConnection = false; + private final boolean isManagedConnection; - QuotaRetriever() { + public QuotaRetriever(final Connection conn) throws IOException { + this(conn, (QuotaFilter) null); } - void init(final Configuration conf, final Scan scan) throws IOException { + public QuotaRetriever(final Connection conn, final QuotaFilter filter) throws IOException { + this(conn, QuotaTableUtil.makeScan(filter)); + } + + public QuotaRetriever(final Connection conn, final Scan scan) throws IOException { + isManagedConnection = false; + init(conn, scan); + } + + QuotaRetriever(final Configuration conf, final Scan scan) throws IOException { // Set this before creating the connection and passing it down to make sure // it's cleaned up if we fail to construct the Scanner. - this.isManagedConnection = true; + isManagedConnection = true; init(ConnectionFactory.createConnection(conf), scan); } - void init(final Connection conn, final Scan scan) throws IOException { + private void init(final Connection conn, final Scan scan) throws IOException { this.connection = Objects.requireNonNull(conn); this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME); try { @@ -159,7 +169,10 @@ public void remove() { * @param conf Configuration object to use. * @return the QuotaRetriever * @throws IOException if a remote or network exception occurs + * @deprecated Since 3.0.0, will be removed in 4.0.0. Use + * {@link #QuotaRetriever(Configuration, Scan)} instead. */ + @Deprecated public static QuotaRetriever open(final Configuration conf) throws IOException { return open(conf, null); } @@ -170,12 +183,14 @@ public static QuotaRetriever open(final Configuration conf) throws IOException { * @param filter the QuotaFilter * @return the QuotaRetriever * @throws IOException if a remote or network exception occurs + * @deprecated Since 3.0.0, will be removed in 4.0.0. Use + * {@link #QuotaRetriever(Configuration, Scan)} instead. */ + @Deprecated public static QuotaRetriever open(final Configuration conf, final QuotaFilter filter) throws IOException { Scan scan = QuotaTableUtil.makeScan(filter); - QuotaRetriever scanner = new QuotaRetriever(); - scanner.init(conf, scan); - return scanner; + return new QuotaRetriever(conf, scan); } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java index 32bfcebcb9e4..a89db895cb4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java @@ -475,8 +475,7 @@ void pruneOldRegionReports() { TablesWithQuotas fetchAllTablesWithQuotasDefined() throws IOException { final Scan scan = QuotaTableUtil.makeScan(null); final TablesWithQuotas tablesWithQuotas = new TablesWithQuotas(conn, conf); - try (final QuotaRetriever scanner = new QuotaRetriever()) { - scanner.init(conn, scan); + try (final QuotaRetriever scanner = new QuotaRetriever(conn, scan)) { for (QuotaSettings quotaSettings : scanner) { // Only one of namespace and tablename should be 'null' final String namespace = quotaSettings.getNamespace(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java index cfdcb52db71c..c198f3d9d322 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java @@ -166,9 +166,9 @@ Multimap getSnapshotsToComputeSize() throws IOException { Set tablesToFetchSnapshotsFrom = new HashSet<>(); QuotaFilter filter = new QuotaFilter(); filter.addTypeFilter(QuotaType.SPACE); - try (Admin admin = conn.getAdmin()) { + try (Admin admin = conn.getAdmin(); QuotaRetriever qr = new QuotaRetriever(conn, filter)) { // Pull all of the tables that have quotas (direct, or from namespace) - for (QuotaSettings qs : QuotaRetriever.open(conf, filter)) { + for (QuotaSettings qs : qr) { if (qs.getQuotaType() == QuotaType.SPACE) { String ns = qs.getNamespace(); TableName tn = qs.getTableName(); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/quotas.jsp b/hbase-server/src/main/resources/hbase-webapps/master/quotas.jsp index 780a8d4b3605..a52085b529f9 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/quotas.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/quotas.jsp @@ -30,7 +30,6 @@ %> <% HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); - Configuration conf = master.getConfiguration(); pageContext.setAttribute("pageTitle", "HBase Master Quotas: " + master.getServerName()); List regionServerThrottles = new ArrayList<>(); List namespaceThrottles = new ArrayList<>(); @@ -39,7 +38,7 @@ boolean exceedThrottleQuotaEnabled = false; if (quotaManager != null) { exceedThrottleQuotaEnabled = quotaManager.isExceedThrottleQuotaEnabled(); - try (QuotaRetriever scanner = QuotaRetriever.open(conf, null)) { + try (QuotaRetriever scanner = new QuotaRetriever(master.getConnection())) { for (QuotaSettings quota : scanner) { if (quota instanceof ThrottleSettings) { ThrottleSettings throttle = (ThrottleSettings) quota; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java index 0dda78d26d34..5c29748bf143 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java @@ -120,13 +120,8 @@ static void updateConfigForQuotas(Configuration conf) { * Returns the number of quotas defined in the HBase quota table. */ long listNumDefinedQuotas(Connection conn) throws IOException { - QuotaRetriever scanner = QuotaRetriever.open(conn.getConfiguration()); - try { + try (QuotaRetriever scanner = new QuotaRetriever(conn)) { return Iterables.size(scanner); - } finally { - if (scanner != null) { - scanner.close(); - } } } @@ -353,8 +348,7 @@ void removeAllQuotas(Connection conn) throws IOException { waitForQuotaTable(conn); } else { // Or, clean up any quotas from previous test runs. - QuotaRetriever scanner = QuotaRetriever.open(conn.getConfiguration()); - try { + try (QuotaRetriever scanner = new QuotaRetriever(conn);) { for (QuotaSettings quotaSettings : scanner) { final String namespace = quotaSettings.getNamespace(); final TableName tableName = quotaSettings.getTableName(); @@ -370,17 +364,13 @@ void removeAllQuotas(Connection conn) throws IOException { QuotaUtil.deleteUserQuota(conn, userName); } } - } finally { - if (scanner != null) { - scanner.close(); - } } } } QuotaSettings getTableSpaceQuota(Connection conn, TableName tn) throws IOException { - try (QuotaRetriever scanner = QuotaRetriever.open(conn.getConfiguration(), - new QuotaFilter().setTableFilter(tn.getNameAsString()))) { + try (QuotaRetriever scanner = + new QuotaRetriever(conn, new QuotaFilter().setTableFilter(tn.getNameAsString()))) { for (QuotaSettings setting : scanner) { if (setting.getTableName().equals(tn) && setting.getQuotaType() == QuotaType.SPACE) { return setting; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java index 0f01a43355b0..a5d879ce77e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java @@ -327,25 +327,27 @@ public boolean namespaceExists(String ns) throws IOException { } public int getNumSpaceQuotas() throws Exception { - QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration()); - int numSpaceQuotas = 0; - for (QuotaSettings quotaSettings : scanner) { - if (quotaSettings.getQuotaType() == QuotaType.SPACE) { - numSpaceQuotas++; + try (QuotaRetriever scanner = new QuotaRetriever(TEST_UTIL.getConnection())) { + int numSpaceQuotas = 0; + for (QuotaSettings quotaSettings : scanner) { + if (quotaSettings.getQuotaType() == QuotaType.SPACE) { + numSpaceQuotas++; + } } + return numSpaceQuotas; } - return numSpaceQuotas; } public int getThrottleQuotas() throws Exception { - QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration()); - int throttleQuotas = 0; - for (QuotaSettings quotaSettings : scanner) { - if (quotaSettings.getQuotaType() == QuotaType.THROTTLE) { - throttleQuotas++; + try (QuotaRetriever scanner = new QuotaRetriever(TEST_UTIL.getConnection())) { + int throttleQuotas = 0; + for (QuotaSettings quotaSettings : scanner) { + if (quotaSettings.getQuotaType() == QuotaType.THROTTLE) { + throttleQuotas++; + } } + return throttleQuotas; } - return throttleQuotas; } private void createTable(Admin admin, TableName tn) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java index 817f135f0c95..cd266fa0baac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java @@ -123,7 +123,7 @@ public void testThrottleType() throws Exception { QuotaSettingsFactory.throttleUser(userName, ThrottleType.WRITE_NUMBER, 12, TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, true)); - try (QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration())) { + try (QuotaRetriever scanner = new QuotaRetriever(TEST_UTIL.getConnection())) { int countThrottle = 0; int countGlobalBypass = 0; for (QuotaSettings settings : scanner) { @@ -169,7 +169,7 @@ public void testSimpleScan() throws Exception { TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, true)); - try (QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration())) { + try (QuotaRetriever scanner = new QuotaRetriever(TEST_UTIL.getConnection())) { int countThrottle = 0; int countGlobalBypass = 0; for (QuotaSettings settings : scanner) { @@ -345,11 +345,8 @@ public void testSetGetRemoveSpaceQuota() throws Exception { } // Verify we can retrieve it via the QuotaRetriever API - QuotaRetriever scanner = QuotaRetriever.open(admin.getConfiguration()); - try { + try (QuotaRetriever scanner = new QuotaRetriever(admin.getConnection())) { assertSpaceQuota(sizeLimit, violationPolicy, Iterables.getOnlyElement(scanner)); - } finally { - scanner.close(); } // Now, remove the quota @@ -367,11 +364,8 @@ public void testSetGetRemoveSpaceQuota() throws Exception { } // Verify that we can also not fetch it via the API - scanner = QuotaRetriever.open(admin.getConfiguration()); - try { + try (QuotaRetriever scanner = new QuotaRetriever(admin.getConnection())) { assertNull("Did not expect to find a quota entry", scanner.next()); - } finally { - scanner.close(); } } @@ -399,11 +393,8 @@ public void testSetModifyRemoveSpaceQuota() throws Exception { } // Verify we can retrieve it via the QuotaRetriever API - QuotaRetriever quotaScanner = QuotaRetriever.open(admin.getConfiguration()); - try { + try (QuotaRetriever quotaScanner = new QuotaRetriever(admin.getConnection())) { assertSpaceQuota(originalSizeLimit, violationPolicy, Iterables.getOnlyElement(quotaScanner)); - } finally { - quotaScanner.close(); } // Setting a new size and policy should be reflected @@ -427,11 +418,8 @@ public void testSetModifyRemoveSpaceQuota() throws Exception { } // Verify we can retrieve the new quota via the QuotaRetriever API - quotaScanner = QuotaRetriever.open(admin.getConfiguration()); - try { + try (QuotaRetriever quotaScanner = new QuotaRetriever(admin.getConnection())) { assertSpaceQuota(newSizeLimit, newViolationPolicy, Iterables.getOnlyElement(quotaScanner)); - } finally { - quotaScanner.close(); } // Now, remove the quota @@ -449,11 +437,8 @@ public void testSetModifyRemoveSpaceQuota() throws Exception { } // Verify that we can also not fetch it via the API - quotaScanner = QuotaRetriever.open(admin.getConfiguration()); - try { + try (QuotaRetriever quotaScanner = new QuotaRetriever(admin.getConnection())) { assertNull("Did not expect to find a quota entry", quotaScanner.next()); - } finally { - quotaScanner.close(); } } @@ -549,8 +534,7 @@ public void testSetAndRemoveRegionServerQuota() throws Exception { admin.setQuota(QuotaSettingsFactory.throttleRegionServer(regionServer, ThrottleType.READ_NUMBER, 30, TimeUnit.SECONDS)); int count = 0; - QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration(), rsFilter); - try { + try (QuotaRetriever scanner = new QuotaRetriever(TEST_UTIL.getConnection(), rsFilter)) { for (QuotaSettings settings : scanner) { assertTrue(settings.getQuotaType() == QuotaType.THROTTLE); ThrottleSettings throttleSettings = (ThrottleSettings) settings; @@ -564,8 +548,6 @@ public void testSetAndRemoveRegionServerQuota() throws Exception { assertEquals(TimeUnit.SECONDS, throttleSettings.getTimeUnit()); } } - } finally { - scanner.close(); } assertEquals(2, count); @@ -733,14 +715,14 @@ private void verifyRecordNotPresentInQuotaTable() throws Exception { private void verifyFetchableViaAPI(Admin admin, ThrottleType type, long limit, TimeUnit tu) throws Exception { // Verify we can retrieve the new quota via the QuotaRetriever API - try (QuotaRetriever quotaScanner = QuotaRetriever.open(admin.getConfiguration())) { + try (QuotaRetriever quotaScanner = new QuotaRetriever(admin.getConnection())) { assertRPCQuota(type, limit, tu, Iterables.getOnlyElement(quotaScanner)); } } private void verifyNotFetchableViaAPI(Admin admin) throws Exception { // Verify that we can also not fetch it via the API - try (QuotaRetriever quotaScanner = QuotaRetriever.open(admin.getConfiguration())) { + try (QuotaRetriever quotaScanner = new QuotaRetriever(admin.getConnection())) { assertNull("Did not expect to find a quota entry", quotaScanner.next()); } } @@ -830,16 +812,13 @@ private void assertSpaceQuota(long sizeLimit, SpaceViolationPolicy violationPoli } private int countResults(final QuotaFilter filter) throws Exception { - QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration(), filter); - try { + try (QuotaRetriever scanner = new QuotaRetriever(TEST_UTIL.getConnection(), filter)) { int count = 0; for (QuotaSettings settings : scanner) { LOG.debug(Objects.toString(settings)); count++; } return count; - } finally { - scanner.close(); } } From 171ecf2f40974bc467b78273a270254558111fea Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Sat, 20 Jul 2024 00:37:07 +0800 Subject: [PATCH 473/514] HBASE-28702 TestBackupMerge fails 100% of times on flaky dashboard (#6078) Signed-off-by: Duo Zhang Signed-off-by: Bryan Beaudreault --- .../apache/hadoop/hbase/backup/BackupDriver.java | 3 +++ .../apache/hadoop/hbase/backup/BackupInfo.java | 13 +++++++++++++ .../apache/hadoop/hbase/backup/BackupRequest.java | 15 +++++++++++++++ .../hbase/backup/BackupRestoreConstants.java | 8 +++++++- .../hadoop/hbase/backup/impl/BackupAdminImpl.java | 2 +- .../hadoop/hbase/backup/impl/BackupCommands.java | 8 +++++++- .../hadoop/hbase/backup/impl/BackupManager.java | 4 +++- .../hbase/backup/impl/FullTableBackupClient.java | 3 +++ .../hbase/backup/impl/TableBackupClient.java | 3 ++- .../hadoop/hbase/backup/TestBackupBase.java | 9 +++++++-- .../hadoop/hbase/backup/TestBackupMerge.java | 6 +++--- .../hadoop/hbase/snapshot/ExportSnapshot.java | 6 ++++-- 12 files changed, 68 insertions(+), 12 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 547a39c8d623..d55a280b4aa4 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -22,6 +22,8 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_LIST; @@ -151,6 +153,7 @@ protected void addOptions() { addOptWithArg(OPTION_BANDWIDTH, OPTION_BANDWIDTH_DESC); addOptWithArg(OPTION_LIST, OPTION_BACKUP_LIST_DESC); addOptWithArg(OPTION_WORKERS, OPTION_WORKERS_DESC); + addOptNoArg(OPTION_IGNORECHECKSUM, OPTION_IGNORECHECKSUM_DESC); addOptWithArg(OPTION_RECORD_NUMBER, OPTION_RECORD_NUMBER_DESC); addOptWithArg(OPTION_SET, OPTION_SET_DESC); addOptWithArg(OPTION_PATH, OPTION_PATH_DESC); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index fdad0d549830..1fad5b6cfdb1 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -164,6 +164,11 @@ public enum BackupPhase { */ private long bandwidth = -1; + /** + * Do not verify checksum between source snapshot and exported snapshot + */ + private boolean noChecksumVerify; + public BackupInfo() { backupTableInfoMap = new HashMap<>(); } @@ -197,6 +202,14 @@ public void setBandwidth(long bandwidth) { this.bandwidth = bandwidth; } + public void setNoChecksumVerify(boolean noChecksumVerify) { + this.noChecksumVerify = noChecksumVerify; + } + + public boolean getNoChecksumVerify() { + return noChecksumVerify; + } + public void setBackupTableInfoMap(Map backupTableInfoMap) { this.backupTableInfoMap = backupTableInfoMap; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java index c9c7a5b61810..aa2d5b44259f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java @@ -65,6 +65,11 @@ public Builder withBandwidthPerTasks(int bandwidth) { return this; } + public Builder withNoChecksumVerify(boolean noChecksumVerify) { + request.setNoChecksumVerify(noChecksumVerify); + return this; + } + public Builder withYarnPoolName(String name) { request.setYarnPoolName(name); return this; @@ -81,6 +86,7 @@ public BackupRequest build() { private String targetRootDir; private int totalTasks = -1; private long bandwidth = -1L; + private boolean noChecksumVerify = false; private String backupSetName; private String yarnPoolName; @@ -132,6 +138,15 @@ public long getBandwidth() { return this.bandwidth; } + private BackupRequest setNoChecksumVerify(boolean noChecksumVerify) { + this.noChecksumVerify = noChecksumVerify; + return this; + } + + public boolean getNoChecksumVerify() { + return noChecksumVerify; + } + public String getBackupSetName() { return backupSetName; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index 56c454519d81..30a5674eb021 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -62,7 +62,7 @@ public interface BackupRestoreConstants { String OPTION_TABLE = "t"; String OPTION_TABLE_DESC = - "Table name. If specified, only backup images," + " which contain this table will be listed."; + "Table name. If specified, only backup images, which contain this table will be listed."; String OPTION_LIST = "l"; String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated."; @@ -74,6 +74,12 @@ public interface BackupRestoreConstants { String OPTION_WORKERS = "w"; String OPTION_WORKERS_DESC = "Number of parallel MapReduce tasks to execute"; + String OPTION_IGNORECHECKSUM = "i"; + String OPTION_IGNORECHECKSUM_DESC = + "Ignore checksum verify between source snapshot and exported snapshot." + + " Especially when the source and target file system types are different," + + " we should use -i option to skip checksum-checks."; + String OPTION_RECORD_NUMBER = "n"; String OPTION_RECORD_NUMBER_DESC = "Number of records of backup history. Default: 10"; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index f500581e9d85..c36b398e5e86 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -581,7 +581,7 @@ public String backupTables(BackupRequest request) throws IOException { request = builder.withBackupType(request.getBackupType()).withTableList(tableList) .withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName()) .withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth()) - .build(); + .withNoChecksumVerify(request.getNoChecksumVerify()).build(); TableBackupClient client; try { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index ce9c5bbe8fae..3bb3ed33f34d 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -22,6 +22,8 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_LIST; @@ -329,6 +331,8 @@ public void execute() throws IOException { ? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS)) : -1; + boolean ignoreChecksum = cmdline.hasOption(OPTION_IGNORECHECKSUM); + if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) { String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME); // Set system property value for MR job @@ -341,7 +345,8 @@ public void execute() throws IOException { .withTableList( tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) .withTargetRootDir(targetBackupDir).withTotalTasks(workers) - .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); + .withBandwidthPerTasks(bandwidth).withNoChecksumVerify(ignoreChecksum) + .withBackupSetName(setName).build(); String backupId = admin.backupTables(request); System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); } catch (IOException e) { @@ -394,6 +399,7 @@ protected void printUsage() { options.addOption(OPTION_TABLE, true, OPTION_TABLE_LIST_DESC); options.addOption(OPTION_YARN_QUEUE_NAME, true, OPTION_YARN_QUEUE_NAME_DESC); options.addOption(OPTION_DEBUG, false, OPTION_DEBUG_DESC); + options.addOption(OPTION_IGNORECHECKSUM, false, OPTION_IGNORECHECKSUM_DESC); HelpFormatter helpFormatter = new HelpFormatter(); helpFormatter.setLeftPadding(2); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index f0c93db4b4c2..41dc300abfaf 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -193,7 +193,8 @@ public void close() { * @throws BackupException exception */ public BackupInfo createBackupInfo(String backupId, BackupType type, List tableList, - String targetRootDir, int workers, long bandwidth) throws BackupException { + String targetRootDir, int workers, long bandwidth, boolean noChecksumVerify) + throws BackupException { if (targetRootDir == null) { throw new BackupException("Wrong backup request parameter: target backup root directory"); } @@ -230,6 +231,7 @@ public BackupInfo createBackupInfo(String backupId, BackupType type, List(backupInfo.getTables()); } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index ed17ef8a1173..86aa0f8bd923 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -395,9 +395,14 @@ Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, i protected BackupRequest createBackupRequest(BackupType type, List tables, String path) { + return createBackupRequest(type, tables, path, false); + } + + protected BackupRequest createBackupRequest(BackupType type, List tables, String path, + boolean noChecksumVerify) { BackupRequest.Builder builder = new BackupRequest.Builder(); - BackupRequest request = - builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build(); + BackupRequest request = builder.withBackupType(type).withTableList(tables) + .withTargetRootDir(path).withNoChecksumVerify(noChecksumVerify).build(); return request; } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 5a6d21dad84f..38204f68e31a 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -138,15 +138,15 @@ public void testIncBackupMergeRestoreSeparateFs() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); List tables = Lists.newArrayList(table1, table2); - BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR, true); String backupIdFull = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull)); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); String backupIdIncMultiple2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple2)); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index d10ff7f9b3ea..186289e517ca 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -589,8 +589,10 @@ private void verifyCopyResult(final FileStatus inputStat, final FileStatus outpu errMessage .append(" You can choose file-level checksum validation via " + "-Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes" - + " or filesystems are different.") - .append(" Or you can skip checksum-checks altogether with --no-checksum-verify.\n") + + " or filesystems are different.\n") + .append(" Or you can skip checksum-checks altogether with -no-checksum-verify,") + .append( + " for the table backup scenario, you should use -i option to skip checksum-checks.\n") .append(" (NOTE: By skipping checksums, one runs the risk of " + "masking data-corruption during file-transfer.)\n"); } From 54f6e9192b37f5e9d2280d652801223622f63e54 Mon Sep 17 00:00:00 2001 From: Divneet18 Date: Fri, 19 Jul 2024 17:59:20 -0700 Subject: [PATCH 474/514] HBASE-28745 : Default Zookeeper ConnectionRegistry APIs timeout should be less (#6105) Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/client/ZKConnectionRegistry.java | 2 +- .../apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 8c4bdf4d51c6..ebb43723b8f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -72,7 +72,7 @@ class ZKConnectionRegistry implements ConnectionRegistry { private final Configuration conf; private final int zkRegistryAsyncTimeout; public static final String ZK_REGISTRY_ASYNC_GET_TIMEOUT = "zookeeper.registry.async.get.timeout"; - public static final int DEFAULT_ZK_REGISTRY_ASYNC_GET_TIMEOUT = 60000; // 1 min + public static final int DEFAULT_ZK_REGISTRY_ASYNC_GET_TIMEOUT = 10000; // 10 sec // User not used, but for rpc based registry we need it ZKConnectionRegistry(Configuration conf, User ignored) { diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java index 23a8c339cd71..f17b29fecec8 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java @@ -217,9 +217,9 @@ public void testNotCloseZkWhenPending() throws Exception { @Test public void testReadWithTimeout() throws Exception { - assertArrayEquals(DATA, RO_ZK.get(PATH, 60000).get()); - assertEquals(CHILDREN, RO_ZK.exists(PATH, 60000).get().getNumChildren()); - List children = RO_ZK.list(PATH, 60000).get(); + assertArrayEquals(DATA, RO_ZK.get(PATH, 10000).get()); + assertEquals(CHILDREN, RO_ZK.exists(PATH, 10000).get().getNumChildren()); + List children = RO_ZK.list(PATH, 10000).get(); assertEquals(CHILDREN, children.size()); Collections.sort(children); for (int i = 0; i < CHILDREN; i++) { From 2129f7f178a982d1538bae957adaaf7e2e5a0ff3 Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Sun, 21 Jul 2024 20:43:51 +0800 Subject: [PATCH 475/514] HBASE-28744 Add a new command-line option for table backup in our ref guide (#6103) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/backup_restore.adoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/main/asciidoc/_chapters/backup_restore.adoc b/src/main/asciidoc/_chapters/backup_restore.adoc index 592faebfc168..25555751cb73 100644 --- a/src/main/asciidoc/_chapters/backup_restore.adoc +++ b/src/main/asciidoc/_chapters/backup_restore.adoc @@ -254,6 +254,10 @@ _-b _:: _-d_:: (Optional) Enables "DEBUG" mode which prints additional logging about the backup creation. +_-i_:: + (Optional) Ignore checksum verify between source snapshot and exported snapshot. Especially when the source and target file system types + are different, we should use -i option to skip checksum-checks. + _-q _:: (Optional) Allows specification of the name of a YARN queue which the MapReduce job to create the backup should be executed in. This option is useful to prevent backup tasks from stealing resources away from other MapReduce jobs of high importance. From b0240ce6ea040f64c8886adb294b1d270f4cb3fa Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Sun, 21 Jul 2024 23:20:13 +0800 Subject: [PATCH 476/514] HBASE-28734 Improve HBase shell snapshot command Doc with TTL option (#6107) Signed-off-by: Duo Zhang --- hbase-shell/src/main/ruby/shell/commands/snapshot.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb index 998449470817..0869571e2ea7 100644 --- a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb +++ b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb @@ -24,6 +24,7 @@ def help Take a snapshot of specified table. Examples: hbase> snapshot 'sourceTable', 'snapshotName' + hbase> snapshot 'sourceTable', 'snapshotName', {TTL => 86400} hbase> snapshot 'namespace:sourceTable', 'snapshotName', {SKIP_FLUSH => true, MAX_FILESIZE => 21474836480} EOF end From 743e8d6a6a1aa16754c1a6691900f5d231b5f8b2 Mon Sep 17 00:00:00 2001 From: Vineet Kumar Maheshwari Date: Mon, 22 Jul 2024 18:44:52 +0530 Subject: [PATCH 477/514] HBASE-28743 Fixes NPE for TableSnapshotScanner by disabling mslab (#6101) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani Signed-off-by: Pankaj Kumar --- .../org/apache/hadoop/hbase/client/TableSnapshotScanner.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index 72242c47558c..c2bc0f08d10e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; @@ -122,6 +123,7 @@ public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, this.scan = scan; this.snapshotAlreadyRestored = snapshotAlreadyRestored; this.fs = rootDir.getFileSystem(conf); + conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, false); if (snapshotAlreadyRestored) { this.restoreDir = restoreDir; From 1315871c8caca5ce09518c5468ea257dbc6c79cd Mon Sep 17 00:00:00 2001 From: Xin Sun Date: Tue, 23 Jul 2024 09:57:07 +0800 Subject: [PATCH 478/514] HBASE-28749 Remove the duplicate configurations named hbase.wal.batch.size (#6111) Signed-off-by: Pankaj Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java | 3 --- .../hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 8d4afb322d5a..03f3d2508de8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -102,9 +102,6 @@ public class AsyncFSWAL extends AbstractFSWAL { private static final Logger LOG = LoggerFactory.getLogger(AsyncFSWAL.class); - public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; - public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; - public static final String ASYNC_WAL_USE_SHARED_EVENT_LOOP = "hbase.wal.async.use-shared-event-loop"; public static final boolean DEFAULT_ASYNC_WAL_USE_SHARED_EVENT_LOOP = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java index 97488cdfc098..510814ed1279 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java @@ -137,7 +137,7 @@ public static void setUp() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setClass(AsyncFSWALProvider.WRITER_IMPL, TestAsyncWriter.class, AsyncWriter.class); // set a very small size so we will reach the batch size when writing out a single edit - conf.setLong(AsyncFSWAL.WAL_BATCH_SIZE, 1); + conf.setLong(AbstractFSWAL.WAL_BATCH_SIZE, 1); TN = TableName.valueOf("test"); RI = RegionInfoBuilder.newBuilder(TN).build(); From 73f2710e47557218dcf92cc3e7b1788dfbb57fd2 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Thu, 25 Jul 2024 22:26:36 +0530 Subject: [PATCH 479/514] HBASE-28655 IllegalArgumentException: Illegal bufferSize thrown when hbase.io.compress.zstd.buffersize is not configured (#6114) Signed-off-by: Duo Zhang Reviewed-by: Vineet Kumar Maheshwari --- .../hadoop/hbase/io/compress/aircompressor/ZstdCodec.java | 6 +++--- .../org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java index ba7119d83368..8a9a2b7719f9 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java @@ -159,10 +159,10 @@ public class HadoopZstdDecompressor extends HadoopDecompressor // Package private static int getBufferSize(Configuration conf) { - return conf.getInt(ZSTD_BUFFER_SIZE_KEY, + int size = conf.getInt(ZSTD_BUFFER_SIZE_KEY, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY, - // IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT is 0! We can't allow that. - ZSTD_BUFFER_SIZE_DEFAULT)); + CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT)); + return size > 0 ? size : ZSTD_BUFFER_SIZE_DEFAULT; } } diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java index 1c851a61c205..7b97c817aca1 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java @@ -127,10 +127,10 @@ static int getLevel(Configuration conf) { } static int getBufferSize(Configuration conf) { - return conf.getInt(ZSTD_BUFFER_SIZE_KEY, + int size = conf.getInt(ZSTD_BUFFER_SIZE_KEY, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY, - // IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT is 0! We can't allow that. - ZSTD_BUFFER_SIZE_DEFAULT)); + CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT)); + return size > 0 ? size : ZSTD_BUFFER_SIZE_DEFAULT; } static byte[] getDictionary(final Configuration conf) { From 0506565f74b34d4af555797348c86c5dcd2f8c56 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 24 Jul 2024 15:38:06 -0700 Subject: [PATCH 480/514] HBASE-28755 Update downloads.xml for 2.5.10 Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 02e1b11c2d00..73b8dd8c8bbe 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -95,26 +95,26 @@ under the License. - 2.5.9 + 2.5.10 - 2024/07/15 + 2024/07/24 - 2.5.9 vs 2.5.8 + 2.5.10 vs 2.5.9 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc)
    - hadoop3-bin (sha512 asc)
    - hadoop3-client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc)
    + hadoop3-bin (sha512 asc)
    + hadoop3-client-bin (sha512 asc) stable release From 23d3d3d8cf91df7cf7899674d991a54c6326466d Mon Sep 17 00:00:00 2001 From: Xin Sun Date: Fri, 26 Jul 2024 15:02:03 +0800 Subject: [PATCH 481/514] HBASE-28756 RegionSizeCalculator ignored the size of memstore, which leads Spark miss data (#6120) Signed-off-by: Pankaj Signed-off-by: Duo Zhang --- .../hbase/mapreduce/RegionSizeCalculator.java | 4 +-- .../mapreduce/TestRegionSizeCalculator.java | 31 +++++++++++-------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java index cc36ef5deb48..6dc9044dcfb6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java @@ -82,8 +82,8 @@ private void init(RegionLocator regionLocator, Admin admin) throws IOException { regionLocator.getName())) { byte[] regionId = regionLoad.getRegionName(); - long regionSizeBytes = - ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; + long regionSizeBytes = ((long) (regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE) + + regionLoad.getMemStoreSize().get(Size.Unit.MEGABYTE))) * MEGABYTE; sizeMap.put(regionId, regionSizeBytes); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 2fda536438a7..583223691da8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -61,19 +61,22 @@ public void testSimpleTestCase() throws Exception { RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3"); - Admin admin = mockAdmin(mockRegion("region1", 123), mockRegion("region3", 1232), - mockRegion("region2", 54321)); + Admin admin = mockAdmin(mockRegion("region1", 123, 321), mockRegion("region3", 1232, 2321), + mockRegion("region2", 54321, 12345), mockRegion("region4", 6789, 0), + mockRegion("region5", 0, 4567)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); - assertEquals(123 * megabyte, calculator.getRegionSize(Bytes.toBytes("region1"))); - assertEquals(54321 * megabyte, calculator.getRegionSize(Bytes.toBytes("region2"))); - assertEquals(1232 * megabyte, calculator.getRegionSize(Bytes.toBytes("region3"))); + assertEquals((123 + 321) * megabyte, calculator.getRegionSize(Bytes.toBytes("region1"))); + assertEquals((54321 + 12345) * megabyte, calculator.getRegionSize(Bytes.toBytes("region2"))); + assertEquals((1232 + 2321) * megabyte, calculator.getRegionSize(Bytes.toBytes("region3"))); + assertEquals(6789 * megabyte, calculator.getRegionSize(Bytes.toBytes("region4"))); + assertEquals(4567 * megabyte, calculator.getRegionSize(Bytes.toBytes("region5"))); // if regionCalculator does not know about a region, it should return 0 assertEquals(0, calculator.getRegionSize(Bytes.toBytes("otherTableRegion"))); - assertEquals(3, calculator.getRegionSizeMap().size()); + assertEquals(5, calculator.getRegionSizeMap().size()); } /** @@ -85,11 +88,11 @@ public void testLargeRegion() throws Exception { RegionLocator regionLocator = mockRegionLocator("largeRegion"); - Admin admin = mockAdmin(mockRegion("largeRegion", Integer.MAX_VALUE)); + Admin admin = mockAdmin(mockRegion("largeRegion", Integer.MAX_VALUE, Integer.MAX_VALUE)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); - assertEquals(((long) Integer.MAX_VALUE) * megabyte, + assertEquals(((long) Integer.MAX_VALUE) * 2L * megabyte, calculator.getRegionSize(Bytes.toBytes("largeRegion"))); } @@ -99,11 +102,11 @@ public void testDisabled() throws Exception { String regionName = "cz.goout:/index.html"; RegionLocator table = mockRegionLocator(regionName); - Admin admin = mockAdmin(mockRegion(regionName, 999)); + Admin admin = mockAdmin(mockRegion(regionName, 999, 888)); // first request on enabled calculator RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); - assertEquals(999 * megabyte, calculator.getRegionSize(Bytes.toBytes(regionName))); + assertEquals((999 + 888) * megabyte, calculator.getRegionSize(Bytes.toBytes(regionName))); // then disabled calculator. configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false); @@ -116,7 +119,7 @@ public void testDisabled() throws Exception { public void testRegionWithNullServerName() throws Exception { RegionLocator regionLocator = mockRegionLocator(null, Collections.singletonList("someBigRegion")); - Admin admin = mockAdmin(mockRegion("someBigRegion", Integer.MAX_VALUE)); + Admin admin = mockAdmin(mockRegion("someBigRegion", Integer.MAX_VALUE, Integer.MAX_VALUE)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); assertEquals(0, calculator.getRegionSize(Bytes.toBytes("someBigRegion"))); } @@ -158,13 +161,15 @@ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { /** * Creates mock of region with given name and size. - * @param fileSizeMb number of megabytes occupied by region in file store in megabytes + * @param fileSizeMb number of megabytes occupied by region in file store in megabytes + * @param memStoreSize number of megabytes occupied by region in memstore in megabytes */ - private RegionMetrics mockRegion(String regionName, int fileSizeMb) { + private RegionMetrics mockRegion(String regionName, int fileSizeMb, int memStoreSize) { RegionMetrics region = Mockito.mock(RegionMetrics.class); when(region.getRegionName()).thenReturn(Bytes.toBytes(regionName)); when(region.getNameAsString()).thenReturn(regionName); when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, Size.Unit.MEGABYTE)); + when(region.getMemStoreSize()).thenReturn(new Size(memStoreSize, Size.Unit.MEGABYTE)); return region; } } From 6d89c633ff7067c82bc7ed8b00e0f3d4be6cd9ff Mon Sep 17 00:00:00 2001 From: lupeng Date: Sat, 27 Jul 2024 14:49:54 +0800 Subject: [PATCH 482/514] HBASE-28753 FNFE may occur when accessing the region.jsp of the replica region (#6117) Signed-off-by: Duo Zhang Signed-off-by: Pankaj Kumar --- .../hbase-webapps/regionserver/region.jsp | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp index 3eeab8eb341c..9cb432b326a2 100644 --- a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp @@ -22,27 +22,31 @@ import="java.util.Collection" import="java.util.Date" import="java.util.List" + import="org.apache.hadoop.fs.FileSystem" import="org.apache.hadoop.fs.FileStatus" import="org.apache.hadoop.fs.Path" import="org.apache.hadoop.hbase.HConstants" + import="org.apache.hadoop.hbase.client.RegionInfo" import="org.apache.hadoop.hbase.client.RegionInfoDisplay" import="org.apache.hadoop.hbase.mob.MobUtils" import="org.apache.hadoop.hbase.regionserver.HRegionServer" import="org.apache.hadoop.hbase.regionserver.HMobStore" import="org.apache.hadoop.hbase.regionserver.HStoreFile" - import="org.apache.hadoop.hbase.regionserver.Region" - import="org.apache.hadoop.hbase.regionserver.Store" - import="org.apache.hadoop.hbase.regionserver.StoreFile" + import="org.apache.hadoop.hbase.regionserver.HRegion" + import="org.apache.hadoop.hbase.regionserver.HStore" %> <% String regionName = request.getParameter("name"); HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); + FileSystem fs = rs.getFileSystem(); - Region region = rs.getRegion(regionName); + HRegion region = rs.getRegion(regionName); String displayName; + boolean isReplicaRegion = false; if (region != null) { displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(region.getRegionInfo(), rs.getConfiguration()); + isReplicaRegion = region.getRegionInfo().getReplicaId() > RegionInfo.DEFAULT_REPLICA_ID; } else { displayName = "region {" + regionName + "} is not currently online on this region server"; } @@ -59,11 +63,11 @@ -<% if(region != null) { // - List stores = region.getStores(); - for (Store store : stores) { +<% if(region != null) { + List stores = region.getStores(); + for (HStore store : stores) { String cf = store.getColumnFamilyName(); - Collection storeFiles = store.getStorefiles(); %> + Collection storeFiles = store.getStorefiles(); %>

    Column Family: <%= cf %>

    @@ -79,17 +83,20 @@ Len Of Biggest Cell Key Of Biggest Cell - <% for(StoreFile sf : storeFiles) { %> + <% int count = 0; + for(HStoreFile sf : storeFiles) { + if (isReplicaRegion && !fs.exists(sf.getPath())) continue; + count ++; %> <%= sf.getPath() %> - <%= (int) (rs.getFileSystem().getLength(sf.getPath()) / 1024 / 1024) %> + <%= (int) (fs.getLength(sf.getPath()) / 1024 / 1024) %> <%= new Date(sf.getModificationTimestamp()) %> - <%= String.format("%,1d", ((HStoreFile)sf).getFileInfo().getHFileInfo().getLenOfBiggestCell()) %> - <%= ((HStoreFile)sf).getFileInfo().getHFileInfo().getKeyOfBiggestCell() %> + <%= String.format("%,1d", sf.getFileInfo().getHFileInfo().getLenOfBiggestCell()) %> + <%= sf.getFileInfo().getHFileInfo().getKeyOfBiggestCell() %> <% } %> -

    <%= storeFiles.size() %> StoreFile(s) in set.

    +

    <%= count %> StoreFile(s) in set. <%= isReplicaRegion ? "The information about storefile(s) may not up-to-date because it's not the primary region." : "" %>

    <% if (store instanceof HMobStore) { %> @@ -103,17 +110,18 @@ <% int mobCnt = 0; - for (StoreFile sf : storeFiles) { + for (HStoreFile sf : storeFiles) { try { - byte[] value = ((HStoreFile)sf).getMetadataValue(HStoreFile.MOB_FILE_REFS); + byte[] value = sf.getMetadataValue(HStoreFile.MOB_FILE_REFS); if (value == null) { continue; } Collection fileNames = MobUtils.deserializeMobFileRefs(value).build().values(); - mobCnt += fileNames.size(); for (String fileName : fileNames) { Path mobPath = new Path(((HMobStore) store).getPath(), fileName); + if (isReplicaRegion && !fs.exists(mobPath)) continue; + mobCnt ++; FileStatus status = rs.getFileSystem().getFileStatus(mobPath); String mobPathStr = mobPath.toString(); String encodedStr = URLEncoder.encode(mobPathStr, HConstants.UTF8_ENCODING); %> @@ -132,7 +140,7 @@ <% } } %> -

    <%= mobCnt %> MobFile(s) in set.

    +

    <%= mobCnt %> MobFile(s) in set. <%= isReplicaRegion ? "The information about MobFile(s) may not up-to-date because it's not the primary region." : "" %>

    <% } } From 43b1d78e2040a97e270483f544eafd799f106783 Mon Sep 17 00:00:00 2001 From: Vineet Kumar Maheshwari Date: Sat, 27 Jul 2024 12:26:48 +0530 Subject: [PATCH 483/514] HBASE-28742 Fixes NPE for CompactionTool when mslab enabled (#6097) Signed-off-by: Duo Zhang Signed-off-by: Pankaj Kumar --- .../hbase/regionserver/CompactionTool.java | 4 + .../TestCompactionToolNpeFix.java | 141 ++++++++++++++++++ 2 files changed, 145 insertions(+) create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 9f2db27466cb..3034da69c6a8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -211,6 +211,8 @@ public void setup(Context context) { } catch (IOException e) { throw new RuntimeException("Could not get the input FileSystem", e); } + // Disable the MemStoreLAB as MemStore is not used by flow during compaction + conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, false); } @Override @@ -369,6 +371,8 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, */ private int doClient(final FileSystem fs, final Set toCompactDirs, final boolean compactOnce, final boolean major) throws IOException { + // Disable the MemStoreLAB as MemStore is not used by flow during compaction + getConf().setBoolean(MemStoreLAB.USEMSLAB_KEY, false); CompactionWorker worker = new CompactionWorker(fs, getConf()); for (Path path : toCompactDirs) { worker.compact(path, compactOnce, major); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java new file mode 100644 index 000000000000..b230fd6c4d93 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.util.ToolRunner; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MediumTests.class, RegionServerTests.class }) +public class TestCompactionToolNpeFix { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCompactionToolNpeFix.class); + + private static final HBaseTestingUtil TESTUTIL = new HBaseTestingUtil(); + + private HRegion region; + private final static byte[] qualifier = Bytes.toBytes("qf"); + private static Path rootDir; + private final TableName tableName = TableName.valueOf(getClass().getSimpleName()); + + @BeforeClass + public static void setUpAfterClass() throws Exception { + TESTUTIL.getConfiguration().setBoolean(MemStoreLAB.USEMSLAB_KEY, false); + TESTUTIL.startMiniCluster(); + rootDir = TESTUTIL.getDefaultRootDirPath(); + TESTUTIL.startMiniMapReduceCluster(); + + } + + @AfterClass + public static void tearDown() throws Exception { + TESTUTIL.shutdownMiniMapReduceCluster(); + TESTUTIL.shutdownMiniCluster(); + TESTUTIL.cleanupTestDir(); + + } + + @Before + public void setUp() throws IOException { + TESTUTIL.createTable(tableName, HBaseTestingUtil.fam1); + this.region = TESTUTIL.getMiniHBaseCluster().getRegions(tableName).get(0); + } + + @After + public void after() throws IOException { + TESTUTIL.deleteTable(tableName); + } + + private void putAndFlush(int key) throws Exception { + Put put = new Put(Bytes.toBytes(key)); + put.addColumn(HBaseTestingUtil.fam1, qualifier, Bytes.toBytes("val" + key)); + region.put(put); + TESTUTIL.flush(tableName); + } + + private HStore prepareStoreWithMultiFiles() throws Exception { + for (int i = 0; i < 5; i++) { + this.putAndFlush(i); + } + HStore store = region.getStore(HBaseTestingUtil.fam1); + assertEquals(5, store.getStorefilesCount()); + return store; + } + + @Test + public void testCompactedFilesArchived() throws Exception { + HStore store = prepareStoreWithMultiFiles(); + Path tableDir = CommonFSUtils.getTableDir(rootDir, region.getRegionInfo().getTable()); + FileSystem fs = store.getFileSystem(); + String storePath = tableDir + "/" + region.getRegionInfo().getEncodedName() + "/" + + Bytes.toString(HBaseTestingUtil.fam1); + FileStatus[] regionDirFiles = fs.listStatus(new Path(storePath)); + assertEquals(5, regionDirFiles.length); + String defaultFS = TESTUTIL.getMiniHBaseCluster().getConfiguration().get("fs.defaultFS"); + Configuration config = HBaseConfiguration.create(); + config.set("fs.defaultFS", defaultFS); + int result = ToolRunner.run(config, new CompactionTool(), + new String[] { "-compactOnce", "-major", storePath }); + assertEquals(0, result); + regionDirFiles = fs.listStatus(new Path(storePath)); + assertEquals(1, regionDirFiles.length); + } + + @Test + public void testCompactedFilesArchivedMapRed() throws Exception { + HStore store = prepareStoreWithMultiFiles(); + Path tableDir = CommonFSUtils.getTableDir(rootDir, region.getRegionInfo().getTable()); + FileSystem fs = store.getFileSystem(); + String storePath = tableDir + "/" + region.getRegionInfo().getEncodedName() + "/" + + Bytes.toString(HBaseTestingUtil.fam1); + FileStatus[] regionDirFiles = fs.listStatus(new Path(storePath)); + assertEquals(5, regionDirFiles.length); + String defaultFS = TESTUTIL.getMiniHBaseCluster().getConfiguration().get("fs.defaultFS"); + Configuration config = HBaseConfiguration.create(TESTUTIL.getConfiguration()); + config.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); + config.set("fs.defaultFS", defaultFS); + int result = ToolRunner.run(config, new CompactionTool(), + new String[] { "-compactOnce", "-mapred", "-major", storePath }); + assertEquals(0, result); + regionDirFiles = fs.listStatus(new Path(storePath)); + assertEquals(1, regionDirFiles.length); + } +} From 634b200e884725357b610730cf029beb79dce9fd Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 27 Jul 2024 22:26:15 +0800 Subject: [PATCH 484/514] HBASE-28522 UNASSIGN proc indefinitely stuck on dead rs (#5995) Signed-off-by: Viraj Jasani Reviewed-by: Ray Mattingly --- .../server/master/MasterProcedure.proto | 9 ++ .../master/assignment/AssignmentManager.java | 67 ++++++-- .../AbstractCloseTableRegionsProcedure.java | 147 ++++++++++++++++++ .../CloseExcessRegionReplicasProcedure.java | 106 +++---------- .../procedure/CloseTableRegionsProcedure.java | 91 +++++++++++ .../procedure/DisableTableProcedure.java | 11 +- .../procedure/ServerCrashProcedure.java | 14 -- .../TestRaceBetweenSCPAndDTP.java | 114 +++++++------- 8 files changed, 375 insertions(+), 184 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractCloseTableRegionsProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseTableRegionsProcedure.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/master/{assignment => procedure}/TestRaceBetweenSCPAndDTP.java (57%) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 9161a02c1800..81d16b2861ca 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -811,3 +811,12 @@ message CloseExcessRegionReplicasProcedureStateData { required TableName table_name = 1; required uint32 new_replica_count = 2; } + +enum CloseTableRegionsProcedureState { + CLOSE_TABLE_REGIONS_SCHEDULE = 1; + CLOSE_TABLE_REGIONS_CONFIRM = 2; +} + +message CloseTableRegionsProcedureStateData { + required TableName table_name = 1; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index bcfa50fe66d5..d05b1f9d3d34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -34,6 +34,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; @@ -1084,25 +1085,16 @@ public TransitRegionStateProcedure[] createUnassignProceduresForDisabling(TableN .toArray(TransitRegionStateProcedure[]::new); } - /** - * Called by ModifyTableProcedure to unassign all the excess region replicas for a table. Will - * skip submit unassign procedure if the region is in transition, so you may need to call this - * method multiple times. - * @param tableName the table for closing excess region replicas - * @param newReplicaCount the new replica count, should be less than current replica count - * @param submit for submitting procedure - * @return the number of regions in transition that we can not schedule unassign procedures - */ - public int submitUnassignProcedureForClosingExcessRegionReplicas(TableName tableName, - int newReplicaCount, Consumer submit) { + private int submitUnassignProcedure(TableName tableName, + Function shouldSubmit, Consumer logRIT, + Consumer submit) { int inTransitionCount = 0; for (RegionStateNode regionNode : regionStates.getTableRegionStateNodes(tableName)) { regionNode.lock(); try { - if (regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) { + if (shouldSubmit.apply(regionNode)) { if (regionNode.isInTransition()) { - LOG.debug("skip scheduling unassign procedure for {} when closing excess region " - + "replicas since it is in transition", regionNode); + logRIT.accept(regionNode); inTransitionCount++; continue; } @@ -1119,12 +1111,46 @@ public int submitUnassignProcedureForClosingExcessRegionReplicas(TableName table return inTransitionCount; } - public int numberOfUnclosedExcessRegionReplicas(TableName tableName, int newReplicaCount) { + /** + * Called by DsiableTableProcedure to unassign all regions for a table. Will skip submit unassign + * procedure if the region is in transition, so you may need to call this method multiple times. + * @param tableName the table for closing excess region replicas + * @param submit for submitting procedure + * @return the number of regions in transition that we can not schedule unassign procedures + */ + public int submitUnassignProcedureForDisablingTable(TableName tableName, + Consumer submit) { + return submitUnassignProcedure(tableName, rn -> true, + rn -> LOG.debug("skip scheduling unassign procedure for {} when closing table regions " + + "for disabling since it is in transition", rn), + submit); + } + + /** + * Called by ModifyTableProcedure to unassign all the excess region replicas for a table. Will + * skip submit unassign procedure if the region is in transition, so you may need to call this + * method multiple times. + * @param tableName the table for closing excess region replicas + * @param newReplicaCount the new replica count, should be less than current replica count + * @param submit for submitting procedure + * @return the number of regions in transition that we can not schedule unassign procedures + */ + public int submitUnassignProcedureForClosingExcessRegionReplicas(TableName tableName, + int newReplicaCount, Consumer submit) { + return submitUnassignProcedure(tableName, + rn -> rn.getRegionInfo().getReplicaId() >= newReplicaCount, + rn -> LOG.debug("skip scheduling unassign procedure for {} when closing excess region " + + "replicas since it is in transition", rn), + submit); + } + + private int numberOfUnclosedRegions(TableName tableName, + Function shouldSubmit) { int unclosed = 0; for (RegionStateNode regionNode : regionStates.getTableRegionStateNodes(tableName)) { regionNode.lock(); try { - if (regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) { + if (shouldSubmit.apply(regionNode)) { if (!regionNode.isInState(State.OFFLINE, State.CLOSED, State.SPLIT)) { unclosed++; } @@ -1136,6 +1162,15 @@ public int numberOfUnclosedExcessRegionReplicas(TableName tableName, int newRepl return unclosed; } + public int numberOfUnclosedRegionsForDisabling(TableName tableName) { + return numberOfUnclosedRegions(tableName, rn -> true); + } + + public int numberOfUnclosedExcessRegionReplicas(TableName tableName, int newReplicaCount) { + return numberOfUnclosedRegions(tableName, + rn -> rn.getRegionInfo().getReplicaId() >= newReplicaCount); + } + public SplitTableRegionProcedure createSplitProcedure(final RegionInfo regionToSplit, final byte[] splitKey) throws IOException { return new SplitTableRegionProcedure(getProcedureEnvironment(), regionToSplit, splitKey); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractCloseTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractCloseTableRegionsProcedure.java new file mode 100644 index 000000000000..bfee7dbf5b07 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractCloseTableRegionsProcedure.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.function.Consumer; +import org.apache.commons.lang3.mutable.MutableBoolean; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; + +/** + * Base class for unassigning table regions. + */ +@InterfaceAudience.Private +public abstract class AbstractCloseTableRegionsProcedure> + extends AbstractStateMachineTableProcedure { + + private static final Logger LOG = + LoggerFactory.getLogger(AbstractCloseTableRegionsProcedure.class); + + protected TableName tableName; + + private RetryCounter retryCounter; + + protected AbstractCloseTableRegionsProcedure() { + } + + protected AbstractCloseTableRegionsProcedure(TableName tableName) { + this.tableName = tableName; + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.REGION_EDIT; + } + + private Flow schedule(MasterProcedureEnv env) throws ProcedureSuspendedException { + MutableBoolean submitted = new MutableBoolean(false); + int inTransitionCount = submitUnassignProcedure(env, p -> { + submitted.setTrue(); + addChildProcedure(p); + }); + if (inTransitionCount > 0 && submitted.isFalse()) { + // we haven't scheduled any unassign procedures and there are still regions in + // transition, sleep for a while and try again + if (retryCounter == null) { + retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + } + long backoffMillis = retryCounter.getBackoffTimeAndIncrementAttempts(); + LOG.info( + "There are still {} region(s) in transition for closing regions of table {}" + + " when executing {}, suspend {}secs and try again later", + inTransitionCount, tableName, getClass().getSimpleName(), backoffMillis / 1000); + suspend((int) backoffMillis, true); + } + setNextState(getConfirmState()); + return Flow.HAS_MORE_STATE; + } + + private Flow confirm(MasterProcedureEnv env) { + int unclosedCount = numberOfUnclosedRegions(env); + if (unclosedCount > 0) { + LOG.info( + "There are still {} unclosed region(s) for closing regions of table {}" + + " when executing {}, continue...", + unclosedCount, tableName, getClass().getSimpleName()); + setNextState(getInitialState()); + return Flow.HAS_MORE_STATE; + } else { + return Flow.NO_MORE_STATE; + } + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, TState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.trace("{} execute state={}", this, state); + if (state == getInitialState()) { + return schedule(env); + } else if (state == getConfirmState()) { + return confirm(env); + } else { + throw new UnsupportedOperationException("unhandled state=" + state); + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, TState state) + throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { + setState(ProcedureProtos.ProcedureState.RUNNABLE); + env.getProcedureScheduler().addFront(this); + return false; + } + + /** + * We have two state for this type of procedures, the initial state for scheduling unassign + * procedures, and the confirm state for checking whether we have unassigned all the regions. + * @return the confirm state + */ + protected abstract TState getConfirmState(); + + /** + * Submit TRSP for unassigning regions. Return the number of regions in RIT state that we can not + * schedule TRSP for them. + */ + protected abstract int submitUnassignProcedure(MasterProcedureEnv env, + Consumer submit); + + /** + * Return the number of uncloses regions. Returning {@code 0} means we are done. + */ + protected abstract int numberOfUnclosedRegions(MasterProcedureEnv env); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java index bb5da2cc48e8..6dd9429aeabf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseExcessRegionReplicasProcedure.java @@ -18,113 +18,33 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; -import org.apache.commons.lang3.mutable.MutableBoolean; +import java.util.function.Consumer; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; -import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; -import org.apache.hadoop.hbase.procedure2.ProcedureUtil; -import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; -import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloseExcessRegionReplicasProcedureState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloseExcessRegionReplicasProcedureStateData; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** * Procedure for close excess region replicas. */ @InterfaceAudience.Private public class CloseExcessRegionReplicasProcedure - extends AbstractStateMachineTableProcedure { + extends AbstractCloseTableRegionsProcedure { - private static final Logger LOG = - LoggerFactory.getLogger(CloseExcessRegionReplicasProcedure.class); - - private TableName tableName; private int newReplicaCount; - private RetryCounter retryCounter; - public CloseExcessRegionReplicasProcedure() { } public CloseExcessRegionReplicasProcedure(TableName tableName, int newReplicaCount) { - this.tableName = tableName; + super(tableName); this.newReplicaCount = newReplicaCount; } - @Override - public TableName getTableName() { - return tableName; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.REGION_EDIT; - } - - @Override - protected Flow executeFromState(MasterProcedureEnv env, - CloseExcessRegionReplicasProcedureState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - LOG.trace("{} execute state={}", this, state); - switch (state) { - case CLOSE_EXCESS_REGION_REPLICAS_SCHEDULE: - MutableBoolean submitted = new MutableBoolean(false); - int inTransitionCount = env.getAssignmentManager() - .submitUnassignProcedureForClosingExcessRegionReplicas(tableName, newReplicaCount, p -> { - submitted.setTrue(); - addChildProcedure(p); - }); - if (inTransitionCount > 0 && submitted.isFalse()) { - // we haven't scheduled any unassign procedures and there are still regions in - // transition, sleep for a while and try again - if (retryCounter == null) { - retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); - } - long backoffMillis = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.info( - "There are still {} region(s) in transition for table {} when closing excess" - + " region replicas, suspend {}secs and try again later", - inTransitionCount, tableName, backoffMillis / 1000); - suspend((int) backoffMillis, true); - } - setNextState(CloseExcessRegionReplicasProcedureState.CLOSE_EXCESS_REGION_REPLICAS_CONFIRM); - return Flow.HAS_MORE_STATE; - case CLOSE_EXCESS_REGION_REPLICAS_CONFIRM: - int unclosedCount = env.getAssignmentManager() - .numberOfUnclosedExcessRegionReplicas(tableName, newReplicaCount); - if (unclosedCount > 0) { - LOG.info("There are still {} unclosed region(s) for table {} when closing excess" - + " region replicas, continue..."); - setNextState( - CloseExcessRegionReplicasProcedureState.CLOSE_EXCESS_REGION_REPLICAS_SCHEDULE); - return Flow.HAS_MORE_STATE; - } else { - return Flow.NO_MORE_STATE; - } - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } - - @Override - protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { - setState(ProcedureProtos.ProcedureState.RUNNABLE); - env.getProcedureScheduler().addFront(this); - return false; - } - - @Override - protected void rollbackState(MasterProcedureEnv env, - CloseExcessRegionReplicasProcedureState state) throws IOException, InterruptedException { - throw new UnsupportedOperationException(); - } - @Override protected CloseExcessRegionReplicasProcedureState getState(int stateId) { return CloseExcessRegionReplicasProcedureState.forNumber(stateId); @@ -158,4 +78,22 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws newReplicaCount = data.getNewReplicaCount(); } + @Override + protected CloseExcessRegionReplicasProcedureState getConfirmState() { + return CloseExcessRegionReplicasProcedureState.CLOSE_EXCESS_REGION_REPLICAS_CONFIRM; + } + + @Override + protected int submitUnassignProcedure(MasterProcedureEnv env, + Consumer submit) { + return env.getAssignmentManager() + .submitUnassignProcedureForClosingExcessRegionReplicas(tableName, newReplicaCount, submit); + } + + @Override + protected int numberOfUnclosedRegions(MasterProcedureEnv env) { + return env.getAssignmentManager().numberOfUnclosedExcessRegionReplicas(tableName, + newReplicaCount); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseTableRegionsProcedure.java new file mode 100644 index 000000000000..0cfe0b41785c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloseTableRegionsProcedure.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.function.Consumer; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloseTableRegionsProcedureState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloseTableRegionsProcedureStateData; + +/** + * Procedure for closing all regions for a table. + */ +@InterfaceAudience.Private +public class CloseTableRegionsProcedure + extends AbstractCloseTableRegionsProcedure { + + public CloseTableRegionsProcedure() { + } + + public CloseTableRegionsProcedure(TableName tableName) { + super(tableName); + } + + @Override + protected int submitUnassignProcedure(MasterProcedureEnv env, + Consumer submit) { + return env.getAssignmentManager().submitUnassignProcedureForDisablingTable(tableName, submit); + } + + @Override + protected int numberOfUnclosedRegions(MasterProcedureEnv env) { + return env.getAssignmentManager().numberOfUnclosedRegionsForDisabling(tableName); + } + + @Override + protected CloseTableRegionsProcedureState getState(int stateId) { + return CloseTableRegionsProcedureState.forNumber(stateId); + } + + @Override + protected int getStateId(CloseTableRegionsProcedureState state) { + return state.getNumber(); + } + + @Override + protected CloseTableRegionsProcedureState getInitialState() { + return CloseTableRegionsProcedureState.CLOSE_TABLE_REGIONS_SCHEDULE; + } + + @Override + protected CloseTableRegionsProcedureState getConfirmState() { + return CloseTableRegionsProcedureState.CLOSE_TABLE_REGIONS_CONFIRM; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.serializeStateData(serializer); + CloseTableRegionsProcedureStateData data = CloseTableRegionsProcedureStateData.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + serializer.serialize(data); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.deserializeStateData(serializer); + CloseTableRegionsProcedureStateData data = + serializer.deserialize(CloseTableRegionsProcedureStateData.class); + tableName = ProtobufUtil.toTableName(data.getTableName()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index a5c2a4122eae..e8999b886afd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -102,8 +102,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTable setNextState(DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE); break; case DISABLE_TABLE_MARK_REGIONS_OFFLINE: - addChildProcedure( - env.getAssignmentManager().createUnassignProceduresForDisabling(tableName)); + addChildProcedure(new CloseTableRegionsProcedure(tableName)); setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER); break; case DISABLE_TABLE_ADD_REPLICATION_BARRIER: @@ -214,14 +213,6 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws skipTableStateCheck = disableTableMsg.getSkipTableStateCheck(); } - // For disabling a table, we does not care whether a region can be online so hold the table xlock - // for ever. This will simplify the logic as we will not be conflict with procedures other than - // SCP. - @Override - protected boolean holdLock(MasterProcedureEnv env) { - return true; - } - @Override public TableName getTableName() { return tableName; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index cdf13064e24a..3fa5358635ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -614,20 +614,6 @@ private void assignRegions(MasterProcedureEnv env, List regions) }); continue; } - if ( - env.getMasterServices().getTableStateManager().isTableState(regionNode.getTable(), - TableState.State.DISABLING) - ) { - // We need to change the state here otherwise the TRSP scheduled by DTP will try to - // close the region from a dead server and will never succeed. Please see HBASE-23636 - // for more details. - ProcedureFutureUtil.suspendIfNecessary(this, this::setUpdateMetaFuture, - env.getAssignmentManager().regionClosedAbnormally(regionNode), env, () -> { - }); - LOG.info("{} found table disabling for region {}, set it state to ABNORMALLY_CLOSED.", - this, regionNode); - continue; - } if ( env.getMasterServices().getTableStateManager().isTableState(regionNode.getTable(), TableState.State.DISABLED) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRaceBetweenSCPAndDTP.java similarity index 57% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRaceBetweenSCPAndDTP.java index e6a40b74e892..075ae8b9506a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRaceBetweenSCPAndDTP.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master.assignment; +package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; -import java.util.Optional; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -28,16 +28,13 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; -import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; -import org.apache.hadoop.hbase.master.region.MasterRegion; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -47,58 +44,55 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; + /** - * Testcase for HBASE-23636. + * Testcase for HBASE-28522. + *

    + * We used to have test with the same name but in different package for HBASE-23636, where DTP will + * hold the exclusive lock all the time, and it will reset TRSPs which has been attached to + * RegionStateNodes, so we need special logic in SCP to deal with it. + *

    + * After HBASE-28522, DTP will not reset TRSPs any more, so SCP does not need to take care of this + * special case, thues we removed the special logic in SCP and also the UT for HBASE-22636 is not + * valid any more, so we just removed the old one and introduce a new one with the same name here. */ @Category({ MasterTests.class, MediumTests.class }) public class TestRaceBetweenSCPAndDTP { - private static final Logger LOG = LoggerFactory.getLogger(TestRaceBetweenSCPAndDTP.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRaceBetweenSCPAndDTP.class); + private static final Logger LOG = LoggerFactory.getLogger(TestRaceBetweenSCPAndDTP.class); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName NAME = TableName.valueOf("Race"); private static byte[] CF = Bytes.toBytes("cf"); - private static CountDownLatch ARRIVE_GET_REGIONS_ON_TABLE; + private static CountDownLatch ARRIVE_GET_REPLICATION_PEER_MANAGER; - private static CountDownLatch RESUME_GET_REGIONS_ON_SERVER; + private static CountDownLatch RESUME_GET_REPLICATION_PEER_MANAGER; - private static final class AssignmentManagerForTest extends AssignmentManager { + public static final class HMasterForTest extends HMaster { - public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) { - super(master, masterRegion); + public HMasterForTest(Configuration conf) throws IOException, KeeperException { + super(conf); } @Override - public TransitRegionStateProcedure[] createUnassignProceduresForDisabling(TableName tableName) { - if (ARRIVE_GET_REGIONS_ON_TABLE != null) { - ARRIVE_GET_REGIONS_ON_TABLE.countDown(); - ARRIVE_GET_REGIONS_ON_TABLE = null; + public ReplicationPeerManager getReplicationPeerManager() { + if (ARRIVE_GET_REPLICATION_PEER_MANAGER != null) { + ARRIVE_GET_REPLICATION_PEER_MANAGER.countDown(); + ARRIVE_GET_REPLICATION_PEER_MANAGER = null; try { - RESUME_GET_REGIONS_ON_SERVER.await(); + RESUME_GET_REPLICATION_PEER_MANAGER.await(); } catch (InterruptedException e) { } } - TransitRegionStateProcedure[] procs = super.createUnassignProceduresForDisabling(tableName); - return procs; - } - } - - public static final class HMasterForTest extends HMaster { - - public HMasterForTest(Configuration conf) throws IOException, KeeperException { - super(conf); - } - - @Override - protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { - return new AssignmentManagerForTest(master, masterRegion); + return super.getReplicationPeerManager(); } } @@ -116,43 +110,43 @@ public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } + private boolean wasExecuted(Procedure proc) { + // RUNNABLE is not enough to make sure that the DTP has acquired the table lock, as we will set + // procedure to RUNNABLE first and then acquire the execution lock + return proc.wasExecuted() || proc.getState() == ProcedureState.WAITING_TIMEOUT + || proc.getState() == ProcedureState.WAITING; + } + @Test - public void test() throws Exception { + public void testRace() throws Exception { RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo(); AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); ServerName sn = am.getRegionStates().getRegionState(region).getServerName(); LOG.info("ServerName={}, region={}", sn, region); - ARRIVE_GET_REGIONS_ON_TABLE = new CountDownLatch(1); - RESUME_GET_REGIONS_ON_SERVER = new CountDownLatch(1); + ARRIVE_GET_REPLICATION_PEER_MANAGER = new CountDownLatch(1); + RESUME_GET_REPLICATION_PEER_MANAGER = new CountDownLatch(1); // Assign to local variable because this static gets set to null in above running thread and // so NPE. - CountDownLatch cdl = ARRIVE_GET_REGIONS_ON_TABLE; - UTIL.getAdmin().disableTableAsync(NAME); + CountDownLatch cdl = ARRIVE_GET_REPLICATION_PEER_MANAGER; + UTIL.getMiniHBaseCluster().stopRegionServer(sn); cdl.await(); + Future future = UTIL.getAdmin().disableTableAsync(NAME); ProcedureExecutor procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); - UTIL.getMiniHBaseCluster().stopRegionServer(sn); - long pid = Procedure.NO_PROC_ID; - do { - Threads.sleep(1); - pid = getSCPPID(procExec); - } while (pid != Procedure.NO_PROC_ID); - final long scppid = pid; - UTIL.waitFor(60000, () -> procExec.isFinished(scppid)); - RESUME_GET_REGIONS_ON_SERVER.countDown(); - - long dtpProcId = - procExec.getProcedures().stream().filter(p -> p instanceof DisableTableProcedure) - .map(p -> (DisableTableProcedure) p).findAny().get().getProcId(); - UTIL.waitFor(60000, () -> procExec.isFinished(dtpProcId)); - } - - /** Returns Returns {@link Procedure#NO_PROC_ID} if no SCP found else actual pid. */ - private long getSCPPID(ProcedureExecutor e) { - Optional optional = e.getProcedures().stream() - .filter(p -> p instanceof ServerCrashProcedure).map(p -> (ServerCrashProcedure) p).findAny(); - return optional.isPresent() ? optional.get().getProcId() : Procedure.NO_PROC_ID; + // make sure the DTP has been executed + UTIL.waitFor(60000, + () -> procExec.getProcedures().stream().filter(p -> p instanceof DisableTableProcedure) + .map(p -> (DisableTableProcedure) p).filter(p -> p.getTableName().equals(NAME)) + .anyMatch(this::wasExecuted)); + RESUME_GET_REPLICATION_PEER_MANAGER.countDown(); + + // make sure the DTP can finish + future.get(); + + // also make sure all SCPs are finished + UTIL.waitFor(60000, () -> procExec.getProcedures().stream() + .filter(p -> p instanceof ServerCrashProcedure).allMatch(Procedure::isFinished)); } } From 8c2e5f32870ecd05a1d8019b8985573094f45650 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 28 Jul 2024 13:28:44 +0800 Subject: [PATCH 485/514] HBASE-28748 Replication blocking: InvalidProtocolBufferException: Protocol message tag had invalid wire type (#6115) Signed-off-by: Xin Sun --- .../wal/ProtobufWALTailingReader.java | 5 +- .../regionserver/WALEntryStream.java | 18 ++++--- .../regionserver/TestBasicWALEntryStream.java | 52 +++++++++++++++++-- 3 files changed, 60 insertions(+), 15 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufWALTailingReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufWALTailingReader.java index 62091acdd1db..6cf141d7053e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufWALTailingReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufWALTailingReader.java @@ -46,7 +46,7 @@ public class ProtobufWALTailingReader extends AbstractProtobufWALReader implements WALTailingReader { - private static final Logger LOG = LoggerFactory.getLogger(ProtobufWALStreamReader.class); + private static final Logger LOG = LoggerFactory.getLogger(ProtobufWALTailingReader.class); private DelegatingInputStream delegatingInput; @@ -117,8 +117,7 @@ private ReadWALKeyResult readWALKey(long originalPosition) { return KEY_ERROR_AND_RESET; } if (available > 0 && available < size) { - LOG.info( - "Available stream not enough for edit, available={}, " + "entry size={} at offset={}", + LOG.info("Available stream not enough for edit, available={}, entry size={} at offset={}", available, size, getPositionQuietly()); return KEY_EOF_AND_RESET; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 8d74d0e0399e..b286f01b8c9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -204,6 +204,15 @@ private void setCurrentPath(Path path) { this.currentPath = path; } + private void resetReader() throws IOException { + if (currentPositionOfEntry > 0) { + reader.resetTo(currentPositionOfEntry, state.resetCompression()); + } else { + // we will read from the beginning so we should always clear the compression context + reader.resetTo(-1, true); + } + } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DCN_NULLPOINTER_EXCEPTION", justification = "HDFS-4380") private HasNext prepareReader() { @@ -213,12 +222,7 @@ private HasNext prepareReader() { LOG.debug("Reset reader {} to pos {}, reset compression={}", currentPath, currentPositionOfEntry, state.resetCompression()); try { - if (currentPositionOfEntry > 0) { - reader.resetTo(currentPositionOfEntry, state.resetCompression()); - } else { - // we will read from the beginning so we should always clear the compression context - reader.resetTo(-1, true); - } + resetReader(); return HasNext.YES; } catch (IOException e) { LOG.warn("Failed to reset reader {} to pos {}, reset compression={}", currentPath, @@ -289,7 +293,7 @@ private HasNext lastAttempt() { LOG.debug("Reset reader {} for the last time to pos {}, reset compression={}", currentPath, currentPositionOfEntry, state.resetCompression()); try { - reader.resetTo(currentPositionOfEntry, state.resetCompression()); + resetReader(); } catch (IOException e) { LOG.warn("Failed to reset reader {} to pos {}, reset compression={}", currentPath, currentPositionOfEntry, state.resetCompression(), e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java index 8cc8103c5a33..991aa2db4d3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java @@ -29,6 +29,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -45,7 +46,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -56,6 +59,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufWALReader; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; import org.apache.hadoop.hbase.replication.WALEntryFilter; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream.HasNext; @@ -63,7 +67,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; @@ -76,6 +79,7 @@ import org.mockito.Mockito; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; public abstract class TestBasicWALEntryStream extends WALEntryStreamTestBase { @@ -93,7 +97,7 @@ public void setUp() throws Exception { initWAL(); } - private Entry next(WALEntryStream entryStream) { + private WAL.Entry next(WALEntryStream entryStream) { assertEquals(HasNext.YES, entryStream.hasNext()); return entryStream.next(); } @@ -562,7 +566,7 @@ private WALEntryFilter getDummyFilter() { return new WALEntryFilter() { @Override - public Entry filter(Entry entry) { + public WAL.Entry filter(WAL.Entry entry) { return entry; } }; @@ -581,7 +585,7 @@ public FailingWALEntryFilter(int numFailuresInFilter) { } @Override - public Entry filter(Entry entry) { + public WAL.Entry filter(WAL.Entry entry) { if (countFailures == numFailures) { return entry; } @@ -839,6 +843,44 @@ public void testReplicationSourceWALReaderWithPartialWALEntryFailingFilter() thr assertNull(reader.poll(10)); } + // testcase for HBASE-28748 + @Test + public void testWALEntryStreamEOFRightAfterHeader() throws Exception { + assertEquals(1, logQueue.getQueueSize(fakeWalGroupId)); + AbstractFSWAL abstractWAL = (AbstractFSWAL) log; + Path emptyLogFile = abstractWAL.getCurrentFileName(); + log.rollWriter(true); + + // AsyncFSWAl and FSHLog both moves the log from WALs to oldWALs directory asynchronously. + // Wait for in flight wal close count to become 0. This makes sure that empty wal is moved to + // oldWALs directory. + Waiter.waitFor(CONF, 5000, + (Waiter.Predicate) () -> abstractWAL.getInflightWALCloseCount() == 0); + // There will 2 logs in the queue. + assertEquals(2, logQueue.getQueueSize(fakeWalGroupId)); + appendToLogAndSync(); + + Path archivedEmptyLogFile = AbstractFSWALProvider.findArchivedLog(emptyLogFile, CONF); + + // read the wal header + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + bos.write(AbstractProtobufWALReader.PB_WAL_MAGIC); + try (FSDataInputStream in = fs.open(archivedEmptyLogFile)) { + IOUtils.skipFully(in, AbstractProtobufWALReader.PB_WAL_MAGIC.length); + WALHeader header = WALHeader.parseDelimitedFrom(in); + header.writeDelimitedTo(bos); + } + // truncate the first empty log so we have an incomplete header + try (FSDataOutputStream out = fs.create(archivedEmptyLogFile, true)) { + bos.writeTo(out); + } + try (WALEntryStream entryStream = + new WALEntryStream(logQueue, fs, CONF, 0, log, new MetricsSource("1"), fakeWalGroupId)) { + assertEquals(HasNext.RETRY_IMMEDIATELY, entryStream.hasNext()); + assertNotNull(next(entryStream)); + } + } + private static class PartialWALEntryFailingWALEntryFilter implements WALEntryFilter { private int filteredWALEntryCount = -1; private int walEntryCount = 0; @@ -851,7 +893,7 @@ public PartialWALEntryFailingWALEntryFilter(int throwExceptionLimit, int walEntr } @Override - public Entry filter(Entry entry) { + public WAL.Entry filter(WAL.Entry entry) { filteredWALEntryCount++; if (filteredWALEntryCount < walEntryCount - 1) { return entry; From 836f2d9a5b3ae013ecd84efb457bf7ff20cdd997 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 28 Jul 2024 13:51:18 +0800 Subject: [PATCH 486/514] HBASE-28719 Use ExtendedCell in WALEdit (#6108) Signed-off-by: Xin Sun --- ...ccessor.java => ClientInternalHelper.java} | 2 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 6 +- .../apache/hadoop/hbase/mapreduce/Import.java | 8 +-- .../hadoop/hbase/mapreduce/PutCombiner.java | 6 +- .../hbase/mapreduce/PutSortReducer.java | 5 +- .../hbase/mapreduce/TestImportExport.java | 7 +-- .../hbase/mapreduce/TestWALRecordReader.java | 29 +++++---- .../hadoop/hbase/regionserver/HRegion.java | 34 ++++++----- .../hbase/regionserver/RSRpcServices.java | 14 ++--- .../hbase/regionserver/RegionScannerImpl.java | 4 +- .../hbase/regionserver/wal/FSWALEntry.java | 7 ++- .../ReplicationSourceWALReader.java | 6 +- .../VisibilityReplicationEndpoint.java | 18 +++--- .../org/apache/hadoop/hbase/wal/WALEdit.java | 59 +++++++++++++++---- .../hbase/wal/WALEditInternalHelper.java | 56 ++++++++++++++++++ .../apache/hadoop/hbase/wal/WALSplitUtil.java | 10 ++-- .../hbase/coprocessor/TestWALObserver.java | 4 +- .../hadoop/hbase/master/AbstractTestDLS.java | 3 +- .../hbase/regionserver/TestHRegion.java | 9 +-- .../TestRecoveredEditsReplayAndAbort.java | 4 +- .../regionserver/wal/AbstractTestFSWAL.java | 11 ++-- .../wal/AbstractTestWALReplay.java | 10 +++- .../wal/ProtobufLogTestHelper.java | 4 +- .../regionserver/wal/TestAsyncFSWAL.java | 4 +- .../wal/TestAsyncFSWALRollStuck.java | 10 ++-- .../regionserver/wal/TestFSWALEntry.java | 4 +- .../regionserver/wal/TestLogRollAbort.java | 4 +- .../wal/TestLogRollingNoCluster.java | 4 +- .../wal/TestWALActionsListener.java | 3 +- .../TestReplicationEmptyWALRecovery.java | 3 +- .../replication/TestReplicationEndpoint.java | 3 +- .../TestReplicationSmallTests.java | 4 +- .../TestReplicationWALEntryFilters.java | 3 +- .../master/TestRecoverStandbyProcedure.java | 4 +- .../regionserver/TestBasicWALEntryStream.java | 5 +- ...ClusterReplicationEndpointFilterEdits.java | 18 +++--- .../regionserver/TestReplicationSource.java | 12 ++-- .../TestReplicationSourceManager.java | 13 ++-- .../TestWALEntryStreamCompressionReset.java | 24 ++++---- .../regionserver/WALEntryStreamTestBase.java | 6 +- .../hbase/wal/CompressedWALTestBase.java | 6 +- .../hbase/wal/TestParsePartialWALFile.java | 6 +- .../hbase/wal/WALPerformanceEvaluation.java | 3 +- .../hadoop/hbase/thrift2/ThriftUtilities.java | 8 +-- 44 files changed, 305 insertions(+), 158 deletions(-) rename hbase-client/src/main/java/org/apache/hadoop/hbase/client/{PackagePrivateFieldAccessor.java => ClientInternalHelper.java} (98%) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEditInternalHelper.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientInternalHelper.java similarity index 98% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientInternalHelper.java index 08293ab83f81..cf3e344349aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientInternalHelper.java @@ -32,7 +32,7 @@ * TODO: A better solution is to separate the data structures used in client and server. */ @InterfaceAudience.Private -public class PackagePrivateFieldAccessor { +public class ClientInternalHelper { public static void setMvccReadPoint(Scan scan, long mvccReadPoint) { scan.setMvccReadPoint(mvccReadPoint); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d3672c5e841b..0dec61563494 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.client.BalancerDecision; import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.CheckAndMutate; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -89,7 +90,6 @@ import org.apache.hadoop.hbase.client.LogEntry; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.OnlineLogRecord; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLoadStats; @@ -1082,7 +1082,7 @@ public static ClientProtos.Scan toScan(final Scan scan) throws IOException { if (scan.getCaching() > 0) { scanBuilder.setCaching(scan.getCaching()); } - long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); + long mvccReadPoint = ClientInternalHelper.getMvccReadPoint(scan); if (mvccReadPoint > 0) { scanBuilder.setMvccReadPoint(mvccReadPoint); } @@ -1192,7 +1192,7 @@ public static Scan toScan(final ClientProtos.Scan proto) throws IOException { scan.setCaching(proto.getCaching()); } if (proto.hasMvccReadPoint()) { - PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint()); + ClientInternalHelper.setMvccReadPoint(scan, proto.getMvccReadPoint()); } if (proto.hasReadType()) { scan.setReadType(toReadType(proto.getReadType())); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 8a8b846959b6..ee09a7dc3972 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -47,12 +47,12 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -205,7 +205,7 @@ public void map(ImmutableBytesWritable row, Result value, Context context) throw filter == null || !filter.filterRowKey( PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) ) { - for (ExtendedCell kv : PackagePrivateFieldAccessor.getExtendedRawCells(value)) { + for (ExtendedCell kv : ClientInternalHelper.getExtendedRawCells(value)) { kv = filterKv(filter, kv); // skip if we filtered it out if (kv == null) { @@ -271,7 +271,7 @@ public void map(ImmutableBytesWritable row, Result value, Context context) throw filter == null || !filter.filterRowKey( PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength())) ) { - for (ExtendedCell kv : PackagePrivateFieldAccessor.getExtendedRawCells(value)) { + for (ExtendedCell kv : ClientInternalHelper.getExtendedRawCells(value)) { kv = filterKv(filter, kv); // skip if we filtered it out if (kv == null) { @@ -336,7 +336,7 @@ private void writeResult(ImmutableBytesWritable key, Result result, Context cont protected void processKV(ImmutableBytesWritable key, Result result, Context context, Put put, Delete delete) throws IOException, InterruptedException { - for (ExtendedCell kv : PackagePrivateFieldAccessor.getExtendedRawCells(result)) { + for (ExtendedCell kv : ClientInternalHelper.getExtendedRawCells(result)) { kv = filterKv(filter, kv); // skip if we filter it out if (kv == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index cd25736bd6ee..4d79eca2a7e6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -22,7 +22,7 @@ import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.mapreduce.Reducer; import org.apache.yetus.audience.InterfaceAudience; @@ -55,9 +55,9 @@ protected void reduce(K row, Iterable vals, Context context) cnt++; if (combinedPut == null) { combinedPut = p; - combinedFamilyMap = PackagePrivateFieldAccessor.getExtendedFamilyCellMap(combinedPut); + combinedFamilyMap = ClientInternalHelper.getExtendedFamilyCellMap(combinedPut); } else { - for (Entry> entry : PackagePrivateFieldAccessor + for (Entry> entry : ClientInternalHelper .getExtendedFamilyCellMap(p).entrySet()) { List existCells = combinedFamilyMap.get(entry.getKey()); if (existCells == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index c8f32c205fb7..4cf7bcd9ff22 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -100,8 +100,7 @@ protected void reduce(ImmutableBytesWritable row, Iterable puts, // just ignoring the bad one? throw new IOException("Invalid visibility expression found in mutation " + p, e); } - for (List cells : PackagePrivateFieldAccessor.getExtendedFamilyCellMap(p) - .values()) { + for (List cells : ClientInternalHelper.getExtendedFamilyCellMap(p).values()) { for (ExtendedCell cell : cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 140f871e4386..63201e857398 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -60,7 +61,6 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -366,7 +366,7 @@ public void testWithDeletes() throws Throwable { s.setRaw(true); ResultScanner scanner = t.getScanner(s); Result r = scanner.next(); - ExtendedCell[] res = PackagePrivateFieldAccessor.getExtendedRawCells(r); + ExtendedCell[] res = ClientInternalHelper.getExtendedRawCells(r); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); assertEquals(now + 4, res[1].getTimestamp()); assertEquals(now + 3, res[2].getTimestamp()); @@ -934,8 +934,7 @@ public void testTagsWithEmptyCodec() throws Exception { int count = 0; Result result; while ((result = scanner.next()) != null) { - List cells = - Arrays.asList(PackagePrivateFieldAccessor.getExtendedRawCells(result)); + List cells = Arrays.asList(ClientInternalHelper.getExtendedRawCells(result)); assertEquals(2, cells.size()); ExtendedCell cell = cells.get(0); assertTrue(CellUtil.isDelete(cell)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 795135cc6d19..3a457ee4d9c1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; @@ -143,10 +144,12 @@ public void testPartialRead() throws Exception { // being millisecond based. long ts = EnvironmentEdgeManager.currentTime(); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); log.appendData(info, getWalKeyImpl(ts, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value)); log.appendData(info, getWalKeyImpl(ts + 1, scopes), edit); log.sync(); Threads.sleep(10); @@ -158,10 +161,12 @@ public void testPartialRead() throws Exception { long ts1 = EnvironmentEdgeManager.currentTime(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value)); log.appendData(info, getWalKeyImpl(ts1 + 1, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value)); log.appendData(info, getWalKeyImpl(ts1 + 2, scopes), edit); log.sync(); log.shutdown(); @@ -203,8 +208,8 @@ public void testWALRecordReader() throws Exception { WAL log = walfactory.getWAL(info); byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), - value)); + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(rowName, family, Bytes.toBytes("1"), + EnvironmentEdgeManager.currentTime(), value)); long txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); @@ -214,8 +219,8 @@ public void testWALRecordReader() throws Exception { log.rollWriter(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), - value)); + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(rowName, family, Bytes.toBytes("2"), + EnvironmentEdgeManager.currentTime(), value)); txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); @@ -261,8 +266,8 @@ public void testWALRecordReaderActiveArchiveTolerance() throws Exception { WAL log = walfactory.getWAL(info); byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), - value)); + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(rowName, family, Bytes.toBytes("1"), + EnvironmentEdgeManager.currentTime(), value)); long txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); @@ -270,8 +275,8 @@ public void testWALRecordReaderActiveArchiveTolerance() throws Exception { Thread.sleep(10); // make sure 2nd edit gets a later timestamp edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), - value)); + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(rowName, family, Bytes.toBytes("2"), + EnvironmentEdgeManager.currentTime(), value)); txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 68f5356f5549..b74935ffafff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Delete; @@ -112,7 +113,6 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -181,6 +181,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; @@ -3513,7 +3514,7 @@ protected void checkAndPrepareMutation(int index, long timestamp) throws IOExcep // store the family map reference to allow for mutations // we know that in mutation, only ExtendedCells are allow so here we do a fake cast, to // simplify later logic - familyCellMaps[index] = PackagePrivateFieldAccessor.getExtendedFamilyCellMap(mutation); + familyCellMaps[index] = ClientInternalHelper.getExtendedFamilyCellMap(mutation); } // store durability for the batch (highest durability of all operations in the batch) @@ -3709,7 +3710,9 @@ public boolean visit(int index) throws IOException { // Add WAL edits from CPs. WALEdit fromCP = walEditsFromCoprocessors[index]; - List cellsFromCP = fromCP == null ? Collections.emptyList() : fromCP.getCells(); + List cellsFromCP = fromCP == null + ? Collections.emptyList() + : WALEditInternalHelper.getExtendedCells(fromCP); addNonSkipWALMutationsToWALEdit(miniBatchOp, walEdit, cellsFromCP, familyCellMaps[index]); return true; } @@ -3719,14 +3722,14 @@ public boolean visit(int index) throws IOException { protected void addNonSkipWALMutationsToWALEdit( final MiniBatchOperationInProgress miniBatchOp, WALEdit walEdit, - List cellsFromCP, Map> familyCellMap) { + List cellsFromCP, Map> familyCellMap) { doAddCellsToWALEdit(walEdit, cellsFromCP, familyCellMap); } - protected static void doAddCellsToWALEdit(WALEdit walEdit, List cellsFromCP, + protected static void doAddCellsToWALEdit(WALEdit walEdit, List cellsFromCP, Map> familyCellMap) { - walEdit.add(cellsFromCP); - walEdit.add((Map) familyCellMap); + WALEditInternalHelper.addExtendedCell(walEdit, cellsFromCP); + WALEditInternalHelper.addMap(walEdit, familyCellMap); } protected abstract void cacheSkipWALMutationForRegionReplication( @@ -4064,7 +4067,7 @@ private Map> reckonDeltas(Mutation mutation, assert mutation instanceof Increment || mutation instanceof Append; Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); // Process a Store/family at a time. - for (Map.Entry> entry : PackagePrivateFieldAccessor + for (Map.Entry> entry : ClientInternalHelper .getExtendedFamilyCellMap(mutation).entrySet()) { final byte[] columnFamilyName = entry.getKey(); List deltas = (List) entry.getValue(); @@ -4260,7 +4263,7 @@ protected void cacheSkipWALMutationForRegionReplication( this.createWALEditForReplicateSkipWAL(miniBatchOp, nonceKeyAndWALEdits); miniBatchOp.setWalEditForReplicateIfExistsSkipWAL(walEditForReplicateIfExistsSkipWAL); } - walEditForReplicateIfExistsSkipWAL.add((Map) familyCellMap); + WALEditInternalHelper.addMap(walEditForReplicateIfExistsSkipWAL, familyCellMap); } @@ -4279,8 +4282,7 @@ private WALEdit createWALEditForReplicateSkipWAL( @Override protected void addNonSkipWALMutationsToWALEdit( final MiniBatchOperationInProgress miniBatchOp, WALEdit walEdit, - List cellsFromCP, Map> familyCellMap) { - + List cellsFromCP, Map> familyCellMap) { super.addNonSkipWALMutationsToWALEdit(miniBatchOp, walEdit, cellsFromCP, familyCellMap); WALEdit walEditForReplicateIfExistsSkipWAL = miniBatchOp.getWalEditForReplicateIfExistsSkipWAL(); @@ -4524,7 +4526,7 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> cpFamilyMap = - PackagePrivateFieldAccessor.getExtendedFamilyCellMap(cpMutation); + ClientInternalHelper.getExtendedFamilyCellMap(cpMutation); region.rewriteCellTags(cpFamilyMap, mutation); // will get added to the memStore later mergeFamilyMaps(familyCellMaps[i], cpFamilyMap); @@ -5096,16 +5098,16 @@ private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutat byte[] byteTs = Bytes.toBytes(ts); if (mutation != null) { if (mutation instanceof Put) { - updateCellTimestamps( - PackagePrivateFieldAccessor.getExtendedFamilyCellMap(mutation).values(), byteTs); + updateCellTimestamps(ClientInternalHelper.getExtendedFamilyCellMap(mutation).values(), + byteTs); } // And else 'delete' is not needed since it already does a second get, and sets the // timestamp from get (see prepareDeleteTimestamps). } else { for (Mutation m : rowMutations.getMutations()) { if (m instanceof Put) { - updateCellTimestamps( - PackagePrivateFieldAccessor.getExtendedFamilyCellMap(m).values(), byteTs); + updateCellTimestamps(ClientInternalHelper.getExtendedFamilyCellMap(m).values(), + byteTs); } } // And else 'delete' is not needed since it already does a second get, and sets the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 5e16b08369b1..ce9cab6bf3b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -67,13 +67,13 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.OperationWithAttributes; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -2078,7 +2078,7 @@ private ExtendedCellScanner getAndReset(RpcController controller) { public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException { long before = EnvironmentEdgeManager.currentTime(); - CellScanner cells = getAndReset(controller); + ExtendedCellScanner cells = getAndReset(controller); try { checkOpen(); List entries = request.getEntryList(); @@ -2500,8 +2500,8 @@ public GetResponse get(final RpcController controller, final GetRequest request) && VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 3) ) { pbr = ProtobufUtil.toResultNoData(r); - ((HBaseRpcController) controller).setCellScanner(PrivateCellUtil - .createExtendedCellScanner(PackagePrivateFieldAccessor.getExtendedRawCells(r))); + ((HBaseRpcController) controller).setCellScanner( + PrivateCellUtil.createExtendedCellScanner(ClientInternalHelper.getExtendedRawCells(r))); addSize(context, r); } else { pbr = ProtobufUtil.toResult(r); @@ -3426,10 +3426,8 @@ private void scan(HBaseRpcController controller, ScanRequest request, RegionScan int lastIdx = results.size() - 1; Result r = results.get(lastIdx); if (r.mayHaveMoreCellsInRow()) { - results.set(lastIdx, - PackagePrivateFieldAccessor.createResult( - PackagePrivateFieldAccessor.getExtendedRawCells(r), r.getExists(), r.isStale(), - false)); + results.set(lastIdx, ClientInternalHelper.createResult( + ClientInternalHelper.getExtendedRawCells(r), r.getExists(), r.isStale(), false)); } } boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java index a5fc2947bee0..81b5f6a6d70c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.IsolationLevel; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FilterWrapper; @@ -128,7 +128,7 @@ private static boolean hasNonce(HRegion region, long nonce) { // synchronize on scannerReadPoints so that nobody calculates // getSmallestReadPoint, before scannerReadPoints is updated. IsolationLevel isolationLevel = scan.getIsolationLevel(); - long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); + long mvccReadPoint = ClientInternalHelper.getMvccReadPoint(scan); this.scannerReadPoints = region.scannerReadPoints; this.rsServices = region.getRegionServerServices(); region.smallestReadPointCalcLock.lock(ReadPointCalculationLock.LockType.RECORDING_LOCK); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 502e04e05b06..8fa83ba08bf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -24,6 +24,7 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ipc.ServerCall; @@ -31,6 +32,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.yetus.audience.InterfaceAudience; @@ -80,7 +82,8 @@ class FSWALEntry extends Entry { if (inMemstore) { // construct familyNames here to reduce the work of log sinker. Set families = edit.getFamilies(); - this.familyNames = families != null ? families : collectFamilies(edit.getCells()); + this.familyNames = + families != null ? families : collectFamilies(WALEditInternalHelper.getExtendedCells(edit)); } else { this.familyNames = Collections.emptySet(); } @@ -90,7 +93,7 @@ class FSWALEntry extends Entry { } } - static Set collectFamilies(List cells) { + static Set collectFamilies(List cells) { if (CollectionUtils.isEmpty(cells)) { return Collections.emptySet(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index bd5b7736f3b9..fe983c9f3ae6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -28,12 +28,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.replication.WALEntryFilter; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; @@ -413,10 +415,10 @@ private void updateReplicationMarkerEdit(Entry entry, long offset) { // Create a new KeyValue KeyValue kv = new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), descriptor.toByteArray()); - ArrayList newCells = new ArrayList<>(); + ArrayList newCells = new ArrayList<>(); newCells.add(kv); // Update edit with new cell. - edit.setCells(newCells); + WALEditInternalHelper.setExtendedCells(edit, newCells); } /** Returns whether the reader thread is running */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index 1b91ed718f61..b97a08c01c38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -24,7 +24,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; @@ -34,6 +33,7 @@ import org.apache.hadoop.hbase.replication.WALEntryFilter; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,10 +75,8 @@ public boolean replicate(ReplicateContext replicateContext) { List newEntries = new ArrayList<>(entries.size()); for (Entry entry : entries) { WALEdit newEdit = new WALEdit(); - ArrayList cells = entry.getEdit().getCells(); - for (Cell c : cells) { - assert c instanceof ExtendedCell; - ExtendedCell cell = (ExtendedCell) c; + List cells = WALEditInternalHelper.getExtendedCells(entry.getEdit()); + for (ExtendedCell cell : cells) { if (cell.getTagsLength() > 0) { visTags.clear(); nonVisTags.clear(); @@ -99,17 +97,17 @@ public boolean replicate(ReplicateContext replicateContext) { + "string type for the cell " + cell + ".", ioe); // just return the old entries as it is without applying the string type change - newEdit.add(cell); + WALEditInternalHelper.addExtendedCell(newEdit, cell); continue; } // Recreate the cell with the new tags and the existing tags - Cell newCell = PrivateCellUtil.createCell(cell, nonVisTags); - newEdit.add(newCell); + ExtendedCell newCell = PrivateCellUtil.createCell(cell, nonVisTags); + WALEditInternalHelper.addExtendedCell(newEdit, newCell); } else { - newEdit.add(cell); + WALEditInternalHelper.addExtendedCell(newEdit, cell); } } else { - newEdit.add(cell); + WALEditInternalHelper.addExtendedCell(newEdit, cell); } } newEntries.add(new Entry((entry.getKey()), newEdit)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index 01dbe06682bb..512e21803b31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -25,6 +25,7 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -145,7 +146,7 @@ public class WALEdit implements HeapSize { private final transient boolean replay; - private ArrayList cells; + private ArrayList cells; /** * All the Cell families in cells. Updated by {@link #add(Cell)} and @@ -249,21 +250,29 @@ public boolean isReplay() { } public WALEdit add(Cell cell, byte[] family) { + return add(PrivateCellUtil.ensureExtendedCell(cell), family); + } + + WALEdit add(ExtendedCell cell, byte[] family) { getOrCreateFamilies().add(family); return addCell(cell); } public WALEdit add(Cell cell) { + return add(PrivateCellUtil.ensureExtendedCell(cell)); + } + + WALEdit add(ExtendedCell cell) { // We clone Family each time we add a Cell. Expensive but safe. For CPU savings, use // add(Map) or add(Cell, family). return add(cell, CellUtil.cloneFamily(cell)); } - public WALEdit add(List cells) { + WALEdit add(List cells) { if (cells == null || cells.isEmpty()) { return this; } - for (Cell cell : cells) { + for (ExtendedCell cell : cells) { add(cell); } return this; @@ -278,16 +287,29 @@ public int size() { } public ArrayList getCells() { + return (ArrayList) cells; + } + + List getExtendedCells() { return cells; } + /** + * This is just for keeping compatibility for CPs, in HBase you should call the below + * {@link #setExtendedCells(ArrayList)} directly to avoid casting. + */ + void setCells(ArrayList cells) { + this.cells = new ArrayList<>((ArrayList) cells); + this.families = null; + } + /** * This is not thread safe. This will change the WALEdit and shouldn't be used unless you are sure * that nothing else depends on the contents being immutable. * @param cells the list of cells that this WALEdit now contains. */ // Used by replay. - public void setCells(ArrayList cells) { + void setExtendedCells(ArrayList cells) { this.cells = cells; this.families = null; } @@ -458,14 +480,31 @@ public static WALProtos.BulkLoadDescriptor getBulkLoadDescriptor(Cell cell) thro } /** - * Append the given map of family->edits to a WALEdit data structure. This does not write to the - * WAL itself. Note that as an optimization, we will stamp the Set of column families into the - * WALEdit to save on our having to calculate column families subsequently down in the actual WAL - * writing. - * @param familyMap map of family->edits + * This is just for keeping compatibility for CPs, in HBase you should call the below + * {@link #addMap(Map)} directly to avoid casting. */ public void add(Map> familyMap) { for (Map.Entry> e : familyMap.entrySet()) { + // 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects." + int listSize = e.getValue().size(); + // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)} + // and have it clone family each time. Optimization! + for (int i = 0; i < listSize; i++) { + addCell(PrivateCellUtil.ensureExtendedCell(e.getValue().get(i))); + } + addFamily(e.getKey()); + } + } + + /** + * Append the given map of family-> edits to a WALEdit data structure. This does not write to + * the WAL itself. Note that as an optimization, we will stamp the Set of column families into the + * WALEdit to save on our having to calculate column families subsequently down in the actual WAL + * writing. + * @param familyMap map of family -> edits + */ + void addMap(Map> familyMap) { + for (Map.Entry> e : familyMap.entrySet()) { // 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects." int listSize = e.getValue().size(); // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)} @@ -481,7 +520,7 @@ private void addFamily(byte[] family) { getOrCreateFamilies().add(family); } - private WALEdit addCell(Cell cell) { + private WALEdit addCell(ExtendedCell cell) { this.cells.add(cell); return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEditInternalHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEditInternalHelper.java new file mode 100644 index 000000000000..0aba676f6dcf --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEditInternalHelper.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.wal; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A helper class so we can call some package private methods of {@link WALEdit} from other + * packages. Since {@link WALEdit} has been exposed to coprocessor and replication implementations, + * we do not want to make all the methods in it public. + */ +@InterfaceAudience.Private +public final class WALEditInternalHelper { + + private WALEditInternalHelper() { + } + + public static WALEdit addExtendedCell(WALEdit edit, ExtendedCell cell) { + return edit.add(cell); + } + + public static void addExtendedCell(WALEdit edit, List cells) { + edit.add(cells); + } + + public static void addMap(WALEdit edit, Map> familyMap) { + edit.addMap(familyMap); + } + + public static void setExtendedCells(WALEdit edit, ArrayList cells) { + edit.setExtendedCells(cells); + } + + public static List getExtendedCells(WALEdit edit) { + return edit.getExtendedCells(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index cbfde9c7e172..fab9936165d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -36,8 +36,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -477,7 +478,8 @@ public ClientProtos.MutationProto.MutationType getType() { */ @Deprecated public static List getMutationsFromWALEntry(AdminProtos.WALEntry entry, - CellScanner cells, Pair logEntry, Durability durability) throws IOException { + ExtendedCellScanner cells, Pair logEntry, Durability durability) + throws IOException { if (entry == null) { // return an empty array return Collections.emptyList(); @@ -501,9 +503,9 @@ public static List getMutationsFromWALEntry(AdminProtos.WALEntry if (!cells.advance()) { throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i); } - Cell cell = cells.current(); + ExtendedCell cell = cells.current(); if (val != null) { - val.add(cell); + WALEditInternalHelper.addExtendedCell(val, cell); } boolean isNewRowOrType = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 105c57b55ea0..989110e41d97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; @@ -444,7 +445,8 @@ private void addWALEdits(final TableName tableName, final RegionInfo hri, final byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j)); byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j)); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes)); // uses WALKeyImpl instead of HLogKey on purpose. will only work for tests where we don't care // about legacy coprocessors txid = wal.appendData(hri, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java index 00f6f2005806..419db220e17f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.junit.After; @@ -434,7 +435,7 @@ public void makeWAL(HRegionServer hrs, List regions, int numEdits, i row = Arrays.copyOfRange(row, 3, 8); // use last 5 bytes because // HBaseTestingUtility.createMultiRegions use 5 bytes key byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i)); - e.add( + WALEditInternalHelper.addExtendedCell(e, new KeyValue(row, COLUMN_FAMILY, qualifier, EnvironmentEdgeManager.currentTime(), value)); log.appendData(curRegionInfo, new WALKeyImpl(curRegionInfo.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), mvcc), e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index d9856b40a831..99c811e720c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -171,6 +171,7 @@ import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -699,7 +700,7 @@ public void testArchiveRecoveredEditsReplay() throws Exception { long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add( + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); writer.append(new WAL.Entry( new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); @@ -753,7 +754,7 @@ public void testSkipRecoveredEditsReplay() throws Exception { long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add( + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); writer.append(new WAL.Entry( new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); @@ -804,7 +805,7 @@ public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception { long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add( + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); writer.append(new WAL.Entry( new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); @@ -899,7 +900,7 @@ public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception { .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())).build()); } else { edit = new WALEdit(); - edit.add( + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); } writer.append(new WAL.Entry( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java index c54307c66fc5..6b372fa99350 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -147,7 +148,8 @@ public void test() throws Exception { // 200KB kv byte[] value = new byte[200 * 1024]; Bytes.random(value); - edit.add(new KeyValue(row, fam1, Bytes.toBytes(j), time, KeyValue.Type.Put, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(row, fam1, Bytes.toBytes(j), time, KeyValue.Type.Put, value)); writer.append(new WAL.Entry( new WALKeyImpl(regionName, tableName, j, time, HConstants.DEFAULT_CLUSTER_ID), edit)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index fd80f6cceae4..e8a364cd54ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -50,8 +50,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.AfterClass; @@ -183,7 +184,7 @@ protected void addEdits(WAL log, RegionInfo hri, TableDescriptor htd, int times, for (int i = 0; i < times; i++) { long timestamp = EnvironmentEdgeManager.currentTime(); WALEdit cols = new WALEdit(); - cols.add(new KeyValue(row, row, row, timestamp, row)); + WALEditInternalHelper.addExtendedCell(cols, new KeyValue(row, row, row, timestamp, row)); WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), htd.getTableName(), SequenceId.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes); @@ -459,9 +460,9 @@ public void run() { // Construct a WALEdit and add it a few times to the WAL. WALEdit edits = new WALEdit(); for (Put p : puts) { - CellScanner cs = p.cellScanner(); + ExtendedCellScanner cs = p.cellScanner(); while (cs.advance()) { - edits.add(cs.current()); + WALEditInternalHelper.addExtendedCell(edits, cs.current()); } } // Add any old cluster id. @@ -517,7 +518,7 @@ public void testWriteEntryCanBeNull() throws IOException { long timestamp = EnvironmentEdgeManager.currentTime(); byte[] row = Bytes.toBytes("row"); WALEdit cols = new WALEdit(); - cols.add(new KeyValue(row, row, row, timestamp, row)); + WALEditInternalHelper.addExtendedCell(cols, new KeyValue(row, row, row, timestamp, row)); WALKeyImpl key = new WALKeyImpl(ri.getEncodedNameAsBytes(), td.getTableName(), SequenceId.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 947c14e716f3..18b560519bb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -99,6 +99,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitUtil; @@ -757,14 +758,16 @@ public void testReplayEditsWrittenIntoWAL() throws Exception { // Add an edit to another family, should be skipped. WALEdit edit = new WALEdit(); long now = ee.currentTime(); - edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName)); wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit); // Delete the c family to verify deletes make it over. edit = new WALEdit(); now = ee.currentTime(); - edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily)); wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit); @@ -1103,7 +1106,8 @@ private WALEdit createWALEdit(final byte[] rowName, final byte[] family, Environ byte[] qualifierBytes = Bytes.toBytes(Integer.toString(index)); byte[] columnBytes = Bytes.toBytes(Bytes.toString(family) + ":" + Integer.toString(index)); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes)); return edit; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java index becb0f1794dd..a70f6242432a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -64,7 +65,8 @@ private static WAL.Entry generateEdit(int i, RegionInfo hri, TableName tableName WALEdit edit = new WALEdit(); int prefix = i; IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j)) - .map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add); + .map(value -> new KeyValue(row, row, row, timestamp, value)) + .forEachOrdered(c -> WALEditInternalHelper.addExtendedCell(edit, c)); return new WAL.Entry(key, edit); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java index dc075ff8f966..b4e5d3402ae2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; @@ -202,7 +203,8 @@ public void append(Entry entry) { public void run() { byte[] row = Bytes.toBytes("row" + index); WALEdit cols = new WALEdit(); - cols.add(new KeyValue(row, row, row, timestamp + index, row)); + WALEditInternalHelper.addExtendedCell(cols, + new KeyValue(row, row, row, timestamp + index, row)); WALKeyImpl key = new WALKeyImpl(ri.getEncodedNameAsBytes(), td.getTableName(), SequenceId.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java index 510814ed1279..931362832ed0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java @@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell.Type; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -181,9 +182,10 @@ public static void tearDown() throws Exception { public void testRoll() throws Exception { byte[] row = Bytes.toBytes("family"); WALEdit edit = new WALEdit(); - edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setFamily(row) - .setQualifier(row).setRow(row).setValue(row) - .setTimestamp(EnvironmentEdgeManager.currentTime()).setType(Type.Put).build()); + WALEditInternalHelper.addExtendedCell(edit, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setFamily(row) + .setQualifier(row).setRow(row).setValue(row) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setType(Type.Put).build()); WALKeyImpl key1 = new WALKeyImpl(RI.getEncodedNameAsBytes(), TN, EnvironmentEdgeManager.currentTime(), MVCC); WAL.appendData(RI, key1, edit); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java index 4d0687829ac0..1a0ecdfc55d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java @@ -21,8 +21,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -48,7 +48,7 @@ public void testCollectFamilies() { byte[] family1 = Bytes.toBytes("family1"); byte[] family2 = Bytes.toBytes("family2"); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); assertEquals(0, FSWALEntry.collectFamilies(cells).size()); cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(family0) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 90f595003cb1..3c3dbe1ead9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -225,7 +226,8 @@ public void testLogRollAfterSplitStart() throws IOException { int total = 20; for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); + WALEditInternalHelper.addExtendedCell(kvs, + new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(Bytes.toBytes("column"), 0); log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 606ee55c3159..dd4fe77c8a38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.ClassRule; @@ -167,7 +168,8 @@ public void run() { } WALEdit edit = new WALEdit(); byte[] bytes = Bytes.toBytes(i); - edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : this.metaTableDescriptor.getColumnFamilyNames()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java index 0ad2e8d0522c..61dd8def4ea6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.After; @@ -107,7 +108,7 @@ public void testActionListener() throws Exception { byte[] b = Bytes.toBytes(i + ""); KeyValue kv = new KeyValue(b, b, b); WALEdit edit = new WALEdit(); - edit.add(kv); + WALEditInternalHelper.addExtendedCell(edit, kv); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(b, 0); long txid = wal.appendData(hri, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java index 67546febab72..38fc6599dad0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.Assert; import org.junit.Before; @@ -361,7 +362,7 @@ private void appendEntriesToWal(int numEntries, WAL wal) throws IOException { byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b, famName, b); WALEdit edit = new WALEdit(); - edit.add(kv); + WALEditInternalHelper.addExtendedCell(edit, kv); txId = wal.appendData(info, getWalKeyImpl(), edit); } wal.sync(txId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index 9bc632e223be..057a9f3567f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; @@ -413,7 +414,7 @@ private Entry createEntry(String tableName, TreeMap scopes, byt WALEdit edit1 = new WALEdit(); for (byte[] kv : kvs) { - edit1.add(new KeyValue(kv, kv, kv)); + WALEditInternalHelper.addExtendedCell(edit1, new KeyValue(kv, kv, kv)); } return new Entry(key1, edit1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index aae2af10264b..06fdc47fa3ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.Before; import org.junit.ClassRule; @@ -423,7 +424,8 @@ public void testReplicationInReplay() throws Exception { final byte[] value = Bytes.toBytes("v"); WALEdit edit = new WALEdit(true); long now = EnvironmentEdgeManager.currentTime(); - edit.add(new KeyValue(rowName, famName, qualifier, now, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(rowName, famName, qualifier, now, value)); WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes); wal.appendData(hri, walKey, edit); wal.sync(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index cd18443d80dd..fb4729d87f83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.Assert; import org.junit.ClassRule; @@ -489,7 +490,7 @@ private Entry createEntry(TreeMap scopes, byte[]... kvs) { WALEdit edit1 = new WALEdit(); for (byte[] kv : kvs) { - edit1.add(new KeyValue(kv, kv, kv)); + WALEditInternalHelper.addExtendedCell(edit1, new KeyValue(kv, kv, kv)); } return new Entry(key1, edit1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java index 63f4f6ca0dbc..1b7b6c817495 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.After; import org.junit.AfterClass; @@ -183,7 +184,8 @@ private List setupWALEntries(int startRow, int endRow) { private Entry createWALEntry(byte[] row, byte[] value) { WALKeyImpl key = new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, 1); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(row, family, qualifier, timestamp, value)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(row, family, qualifier, timestamp, value)); return new Entry(key, edit); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java index 991aa2db4d3d..ac21c6619f5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -557,7 +558,7 @@ private void appendEntriesToLogAndSync(int count) throws IOException { private WALEdit getWALEdit(String row) { WALEdit edit = new WALEdit(); - edit.add(new KeyValue(Bytes.toBytes(row), family, qualifier, + WALEditInternalHelper.addExtendedCell(edit, new KeyValue(Bytes.toBytes(row), family, qualifier, EnvironmentEdgeManager.currentTime(), qualifier)); return edit; } @@ -700,7 +701,7 @@ private void appendEntries(WALProvider.Writer writer, int numEntries) throws IOE byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b, b, b); WALEdit edit = new WALEdit(); - edit.add(kv); + WALEditInternalHelper.addExtendedCell(edit, kv); WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID); NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); scopes.put(b, HConstants.REPLICATION_SCOPE_GLOBAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java index cdef7de2076b..7b108f5ca148 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -98,16 +100,16 @@ public static void tearDownAfterClass() throws Exception { public void testFilterNotExistColumnFamilyEdits() { List> entryList = new ArrayList<>(); // should be filtered - Cell c1 = new KeyValue(ROW, NON_EXISTING_FAMILY, QUALIFIER, + ExtendedCell c1 = new KeyValue(ROW, NON_EXISTING_FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE); Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()), - new WALEdit().add(c1)); + WALEditInternalHelper.addExtendedCell(new WALEdit(), c1)); entryList.add(Lists.newArrayList(e1)); // should be kept - Cell c2 = + ExtendedCell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE); Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()), - new WALEdit().add(c2)); + WALEditInternalHelper.addExtendedCell(new WALEdit(), c2)); entryList.add(Lists.newArrayList(e2, e1)); List> filtered = endpoint.filterNotExistColumnFamilyEdits(entryList); assertEquals(1, filtered.size()); @@ -120,16 +122,16 @@ public void testFilterNotExistColumnFamilyEdits() { public void testFilterNotExistTableEdits() { List> entryList = new ArrayList<>(); // should be filtered - Cell c1 = + ExtendedCell c1 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE); Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2, EnvironmentEdgeManager.currentTime()), - new WALEdit().add(c1)); + WALEditInternalHelper.addExtendedCell(new WALEdit(), c1)); entryList.add(Lists.newArrayList(e1)); // should be kept - Cell c2 = + ExtendedCell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE); Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()), - new WALEdit().add(c2)); + WALEditInternalHelper.addExtendedCell(new WALEdit(), c2)); entryList.add(Lists.newArrayList(e2)); List> filtered = endpoint.filterNotExistTableEdits(entryList); assertEquals(1, filtered.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 05b268d3a0a9..37af52eb93b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -37,9 +37,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -186,9 +187,10 @@ public void testWALEntryFilter() throws IOException { TEST_UTIL.waitFor(30000, () -> rs.getWalEntryFilter() != null); WALEntryFilter wef = rs.getWalEntryFilter(); // Test non-system WAL edit. - WALEdit we = new WALEdit() - .add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(HConstants.EMPTY_START_ROW) - .setFamily(HConstants.CATALOG_FAMILY).setType(Cell.Type.Put).build()); + WALEdit we = WALEditInternalHelper.addExtendedCell(new WALEdit(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + .setRow(HConstants.EMPTY_START_ROW).setFamily(HConstants.CATALOG_FAMILY) + .setType(Cell.Type.Put).build()); WAL.Entry e = new WAL.Entry( new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.valueOf("test"), -1, -1, uuid), we); assertTrue(wef.filter(e) == e); @@ -222,7 +224,7 @@ public void testLogMoving() throws Exception { byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b, b, b); WALEdit edit = new WALEdit(); - edit.add(kv); + WALEditInternalHelper.addExtendedCell(edit, kv); WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID); writer.append(new WAL.Entry(key, edit)); writer.sync(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index ffeb22d01bcc..663b444dc4e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -36,9 +36,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.hamcrest.Matchers; @@ -238,10 +239,12 @@ private void createWALFile(Path file) throws Exception { WALKeyImpl key = new WALKeyImpl(RI.getEncodedNameAsBytes(), TABLE_NAME, EnvironmentEdgeManager.currentTime(), SCOPES); WALEdit edit = new WALEdit(); - edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(F1).setFamily(F1) - .setQualifier(F1).setType(Cell.Type.Put).setValue(F1).build()); - edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(F2).setFamily(F2) - .setQualifier(F2).setType(Cell.Type.Put).setValue(F2).build()); + WALEditInternalHelper.addExtendedCell(edit, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(F1).setFamily(F1) + .setQualifier(F1).setType(Cell.Type.Put).setValue(F1).build()); + WALEditInternalHelper.addExtendedCell(edit, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(F2).setFamily(F2) + .setQualifier(F2).setType(Cell.Type.Put).setValue(F2).build()); writer.append(new WAL.Entry(key, edit)); writer.sync(false); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamCompressionReset.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamCompressionReset.java index 628ddcafeb08..aa30027b3c98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamCompressionReset.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamCompressionReset.java @@ -32,8 +32,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -112,20 +113,23 @@ private static Pair generateWAL() throws Exception { writer.init(FS, path, UTIL.getConfiguration(), false, FS.getDefaultBlockSize(path), null); for (int i = 0; i < Byte.MAX_VALUE; i++) { WALEdit edit = new WALEdit(); - edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) - .setRow(Bytes.toBytes(i)).setFamily(FAMILY).setQualifier(Bytes.toBytes("qualifier-" + i)) - .setValue(Bytes.toBytes("v-" + i)).build()); + WALEditInternalHelper.addExtendedCell(edit, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(Bytes.toBytes(i)).setFamily(FAMILY).setQualifier(Bytes.toBytes("qualifier-" + i)) + .setValue(Bytes.toBytes("v-" + i)).build()); writer.append(new WAL.Entry(new WALKeyImpl(REGION_INFO.getEncodedNameAsBytes(), TABLE_NAME, EnvironmentEdgeManager.currentTime(), MVCC, SCOPE), edit)); } WALEdit edit2 = new WALEdit(); - edit2.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) - .setRow(Bytes.toBytes(-1)).setFamily(FAMILY).setQualifier(Bytes.toBytes("qualifier")) - .setValue(Bytes.toBytes("vv")).build()); - edit2.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) - .setRow(Bytes.toBytes(-1)).setFamily(FAMILY).setQualifier(Bytes.toBytes("qualifier-1")) - .setValue(Bytes.toBytes("vvv")).build()); + WALEditInternalHelper.addExtendedCell(edit2, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(Bytes.toBytes(-1)).setFamily(FAMILY).setQualifier(Bytes.toBytes("qualifier")) + .setValue(Bytes.toBytes("vv")).build()); + WALEditInternalHelper.addExtendedCell(edit2, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(Bytes.toBytes(-1)).setFamily(FAMILY).setQualifier(Bytes.toBytes("qualifier-1")) + .setValue(Bytes.toBytes("vvv")).build()); writer.append(new WAL.Entry(new WALKeyImpl(REGION_INFO.getEncodedNameAsBytes(), TABLE_NAME, EnvironmentEdgeManager.currentTime(), MVCC, SCOPE), edit2)); writer.sync(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java index be1c66c815c8..3af5596b0054 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -177,8 +178,9 @@ protected long appendToLog(int count) throws IOException { protected WALEdit getWALEdits(int count) { WALEdit edit = new WALEdit(); for (int i = 0; i < count; i++) { - edit.add(new KeyValue(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), family, qualifier, - EnvironmentEdgeManager.currentTime(), qualifier)); + WALEditInternalHelper.addExtendedCell(edit, + new KeyValue(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), family, qualifier, + EnvironmentEdgeManager.currentTime(), qualifier)); } return edit; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java index 2ff9223e74b3..93714111d8da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java @@ -28,8 +28,8 @@ import java.util.TreeMap; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -111,9 +111,9 @@ public void doTest(TableName tableName, int valueSize) throws Exception { for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + kvs.add(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) .setRow(row).setFamily(family).setQualifier(Bytes.toBytes(i)).setValue(value).build()); - kvs.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + kvs.add(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) .setType(Cell.Type.DeleteFamily).setRow(row).setFamily(family).build()); wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), kvs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestParsePartialWALFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestParsePartialWALFile.java index 108bcd8f8b4e..dc267058183e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestParsePartialWALFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestParsePartialWALFile.java @@ -31,9 +31,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -157,10 +157,10 @@ public void testPartialParse() throws Exception { EnvironmentEdgeManager.currentTime(), HConstants.DEFAULT_CLUSTER_ID); WALEdit edit = new WALEdit(); if (i % 2 == 0) { - edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) + edit.add(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) .setRow(ROW).setFamily(FAMILY).setQualifier(QUAL).setValue(VALUE).build()); } else { - edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + edit.add(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) .setType(Type.DeleteFamily).setRow(ROW).setFamily(FAMILY).build()); } writer.append(new WAL.Entry(key, edit)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index c462e2e5c621..97ad666c1b18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MockRegionServerServices; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; @@ -152,7 +153,7 @@ public void run() { long now = System.nanoTime(); Put put = setupPut(ThreadLocalRandom.current(), key, value, numFamilies); WALEdit walEdit = new WALEdit(); - walEdit.add(put.getFamilyCellMap()); + walEdit.addMap(ClientInternalHelper.getExtendedFamilyCellMap(put)); RegionInfo hri = region.getRegionInfo(); final WALKeyImpl logkey = new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), now, mvcc, scopes); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index ee060bd53878..c802883c9391 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Consistency; @@ -56,7 +57,6 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.OnlineLogRecord; import org.apache.hadoop.hbase.client.OperationWithAttributes; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -222,7 +222,7 @@ public static List getsFromThrift(List in) throws IOException { * @return converted result, returns an empty result if the input is null */ public static TResult resultFromHBase(Result in) { - ExtendedCell[] raw = PackagePrivateFieldAccessor.getExtendedRawCells(in); + ExtendedCell[] raw = ClientInternalHelper.getExtendedRawCells(in); TResult out = new TResult(); byte[] row = in.getRow(); if (row != null) { @@ -1311,7 +1311,7 @@ public static TPut putFromHBase(Put in) { if (in.getDurability() != Durability.USE_DEFAULT) { out.setDurability(durabilityFromHBase(in.getDurability())); } - for (Map.Entry> entry : PackagePrivateFieldAccessor + for (Map.Entry> entry : ClientInternalHelper .getExtendedFamilyCellMap(in).entrySet()) { byte[] family = entry.getKey(); for (ExtendedCell cell : entry.getValue()) { @@ -1375,7 +1375,7 @@ public static TAppend appendFromHBase(Append in) throws IOException { if (in.getDurability() != Durability.USE_DEFAULT) { out.setDurability(durabilityFromHBase(in.getDurability())); } - for (Map.Entry> entry : PackagePrivateFieldAccessor + for (Map.Entry> entry : ClientInternalHelper .getExtendedFamilyCellMap(in).entrySet()) { byte[] family = entry.getKey(); for (ExtendedCell cell : entry.getValue()) { From d716705699f3d7db36cf49b9c8555a83f28b8279 Mon Sep 17 00:00:00 2001 From: WangXin <1458451310@qq.com> Date: Tue, 30 Jul 2024 10:45:37 +0800 Subject: [PATCH 487/514] HBASE-28758 Remove the aarch64 profile (#6128) Co-authored-by: Little K Signed-off-by: Duo Zhang --- pom.xml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/pom.xml b/pom.xml index 044a8c927883..801d4a061670 100644 --- a/pom.xml +++ b/pom.xml @@ -4665,17 +4665,5 @@ - - aarch64 - - - linux - aarch64 - - - - org.openlabtesting.protobuf - - From 71c4054c2e1cd9cd960994c003b2b2176737f576 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 31 Jul 2024 10:08:10 +0800 Subject: [PATCH 488/514] HBASE-28587 Remove deprecated methods in Cell (#6125) Signed-off-by: Xin Sun --- .../hadoop/hbase/filter/KeyOnlyFilter.java | 15 ++-- .../hbase/shaded/protobuf/ProtobufUtil.java | 15 ++-- .../hadoop/hbase/client/TestOperation.java | 13 +++- .../shaded/protobuf/TestProtobufUtil.java | 11 ++- .../java/org/apache/hadoop/hbase/Cell.java | 75 ++----------------- .../hadoop/hbase/CellComparatorImpl.java | 14 +++- .../org/apache/hadoop/hbase/CellUtil.java | 31 +++++--- .../org/apache/hadoop/hbase/ExtendedCell.java | 28 +++---- .../org/apache/hadoop/hbase/KeyValue.java | 4 +- .../org/apache/hadoop/hbase/KeyValueUtil.java | 59 --------------- .../hadoop/hbase/MetaCellComparator.java | 7 +- .../apache/hadoop/hbase/PrivateCellUtil.java | 34 ++++++++- .../java/org/apache/hadoop/hbase/RawCell.java | 21 ++++++ .../hadoop/hbase/codec/KeyValueCodec.java | 4 +- .../hbase/codec/KeyValueCodecWithTags.java | 4 +- .../hadoop/hbase/io/encoding/NoneEncoder.java | 3 +- .../apache/hadoop/hbase/TestCellBuilder.java | 4 +- .../hbase/TestIndividualBytesFieldCell.java | 4 +- .../org/apache/hadoop/hbase/TestKeyValue.java | 4 +- .../hadoop/hbase/mapreduce/WALPlayer.java | 6 +- .../hbase/regionserver/RSRpcServices.java | 2 +- .../regionserver/ReplicationSinkService.java | 7 +- .../hbase/regionserver/StoreFileReader.java | 7 +- .../hbase/regionserver/wal/WALCellCodec.java | 4 +- .../hbase/replication/BulkLoadCellFilter.java | 4 +- .../NamespaceTableCfWALEntryFilter.java | 6 +- .../ReplicationSinkServiceImpl.java | 4 +- .../replication/ScopeWALEntryFilter.java | 5 +- .../regionserver/ReplicationSink.java | 11 +-- .../security/access/AccessController.java | 2 +- .../apache/hadoop/hbase/wal/WALSplitUtil.java | 3 +- .../hadoop/hbase/io/hfile/TestHFile.java | 2 +- .../regionserver/TestBulkLoadReplication.java | 6 +- .../TestBulkLoadReplicationHFileRefs.java | 6 +- .../hbase/regionserver/TestHRegion.java | 5 +- .../TestMemStoreSegmentsIterator.java | 6 +- .../hadoop/hbase/regionserver/TestTags.java | 8 +- .../regionserver/TestReplicationSink.java | 68 +++++++++-------- .../regionserver/TestWALEntrySinkFilter.java | 18 +++-- .../util/LoadTestDataGeneratorWithTags.java | 7 +- .../hadoop/hbase/wal/TestWALSplitToHFile.java | 15 ++-- 41 files changed, 263 insertions(+), 289 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index 3cbd2771a62d..ef3687482f4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -26,6 +26,7 @@ import java.util.Optional; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -68,11 +69,15 @@ public boolean filterRowKey(Cell cell) throws IOException { } @Override - public Cell transformCell(Cell cell) { - return createKeyOnlyCell(cell); + public Cell transformCell(Cell cell) throws IOException { + if (cell instanceof ExtendedCell) { + return createKeyOnlyCell((ExtendedCell) cell); + } + throw new DoNotRetryIOException( + "Customized cell implementation is not support: " + cell.getClass().getName()); } - private Cell createKeyOnlyCell(Cell c) { + private Cell createKeyOnlyCell(ExtendedCell c) { if (c instanceof ByteBufferExtendedCell) { return new KeyOnlyByteBufferExtendedCell((ByteBufferExtendedCell) c, lenAsVal); } else { @@ -147,11 +152,11 @@ public int hashCode() { } static class KeyOnlyCell implements ExtendedCell { - private Cell cell; + private ExtendedCell cell; private int keyLen; private boolean lenAsVal; - public KeyOnlyCell(Cell c, boolean lenAsVal) { + public KeyOnlyCell(ExtendedCell c, boolean lenAsVal) { this.cell = c; this.lenAsVal = lenAsVal; this.keyLen = KeyValueUtil.keyLength(c); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 0dec61563494..84bd5e9c08a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -1315,10 +1315,11 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : ClientInternalHelper + .getExtendedFamilyCellMap(mutation).entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - for (Cell cell : family.getValue()) { + for (ExtendedCell cell : family.getValue()) { valueBuilder.clear(); valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); @@ -1420,13 +1421,13 @@ public static ClientProtos.Result toResult(final Result result, boolean encodeTa return toResult(result.getExists(), result.isStale()); } - Cell[] cells = result.rawCells(); + ExtendedCell[] cells = ClientInternalHelper.getExtendedRawCells(result); if (cells == null || cells.length == 0) { return result.isStale() ? EMPTY_RESULT_PB_STALE : EMPTY_RESULT_PB; } ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); - for (Cell c : cells) { + for (ExtendedCell c : cells) { builder.addCell(toCell(c, encodeTags)); } @@ -1980,7 +1981,7 @@ public static void toIOException(ServiceException se) throws IOException { throw new IOException(se); } - public static CellProtos.Cell toCell(final Cell kv, boolean encodeTags) { + public static CellProtos.Cell toCell(final ExtendedCell kv, boolean encodeTags) { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); @@ -1991,7 +1992,7 @@ public static CellProtos.Cell toCell(final Cell kv, boolean encodeTags) { ((ByteBufferExtendedCell) kv).getFamilyPosition(), kv.getFamilyLength())); kvbuilder.setQualifier(wrap(((ByteBufferExtendedCell) kv).getQualifierByteBuffer(), ((ByteBufferExtendedCell) kv).getQualifierPosition(), kv.getQualifierLength())); - kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte())); + kvbuilder.setCellType(CellProtos.CellType.forNumber(kv.getTypeByte())); kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); @@ -2006,7 +2007,7 @@ public static CellProtos.Cell toCell(final Cell kv, boolean encodeTags) { kv.getFamilyLength())); kvbuilder.setQualifier(UnsafeByteOperations.unsafeWrap(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); - kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte())); + kvbuilder.setCellType(CellProtos.CellType.forNumber(kv.getTypeByte())); kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 9ac9a6c3ab91..96feaca575f6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -456,7 +457,8 @@ public void testPutCreationWithByteBuffer() { Assert.assertEquals(1984L, c.get(0).getTimestamp()); Assert.assertArrayEquals(VALUE, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); - Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, + CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue((ExtendedCell) c.get(0)))); p = new Put(ROW); p.addColumn(FAMILY, ByteBuffer.wrap(QUALIFIER), 2013L, null); @@ -465,7 +467,8 @@ public void testPutCreationWithByteBuffer() { Assert.assertEquals(2013L, c.get(0).getTimestamp()); Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); - Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, + CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue((ExtendedCell) c.get(0)))); p = new Put(ByteBuffer.wrap(ROW)); p.addColumn(FAMILY, ByteBuffer.wrap(QUALIFIER), 2001L, null); @@ -475,7 +478,8 @@ public void testPutCreationWithByteBuffer() { Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); - Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, + CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue((ExtendedCell) c.get(0)))); p = new Put(ByteBuffer.wrap(ROW), 1970L); p.addColumn(FAMILY, ByteBuffer.wrap(QUALIFIER), 2001L, null); @@ -485,7 +489,8 @@ public void testPutCreationWithByteBuffer() { Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L, p.getTimestamp()); - Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); + Assert.assertEquals(0, + CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue((ExtendedCell) c.get(0)))); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index acc561812853..ee0a711634de 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -501,8 +501,7 @@ public void testRegionLockInfo() { */ @Test public void testCellConversionWithTags() { - - Cell cell = getCellWithTags(); + ExtendedCell cell = getCellWithTags(); CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); assertNotNull(protoCell); @@ -514,7 +513,7 @@ public void testCellConversionWithTags() { assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); } - private Cell getCellWithTags() { + private ExtendedCell getCellWithTags() { Tag tag = new ArrayBackedTag(TAG_TYPE, TAG_STR); ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); cellBuilder.setRow(Bytes.toBytes("row1")); @@ -539,7 +538,7 @@ private ExtendedCell getCellFromProtoResult(CellProtos.Cell protoCell, boolean d */ @Test public void testCellConversionWithoutTags() { - Cell cell = getCellWithTags(); + ExtendedCell cell = getCellWithTags(); CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); assertNotNull(protoCell); @@ -555,7 +554,7 @@ public void testCellConversionWithoutTags() { */ @Test public void testTagEncodeFalseDecodeTrue() { - Cell cell = getCellWithTags(); + ExtendedCell cell = getCellWithTags(); CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); assertNotNull(protoCell); @@ -571,7 +570,7 @@ public void testTagEncodeFalseDecodeTrue() { */ @Test public void testTagEncodeTrueDecodeFalse() { - Cell cell = getCellWithTags(); + ExtendedCell cell = getCellWithTags(); CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); assertNotNull(protoCell); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java index 027451956ee7..1cbc6cb7497a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java @@ -114,26 +114,15 @@ public interface Cell extends HeapSize { // 5) Type /** - * Return the byte representation of the KeyValue.TYPE of this cell: one of Put, Delete, etc - * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Use {@link #getType()}. - */ - @Deprecated - byte getTypeByte(); - - // 6) SequenceId - - /** - * A region-specific unique monotonically increasing sequence ID given to each Cell. It always - * exists for cells in the memstore but is not retained forever. It will be kept for - * {@link HConstants#KEEP_SEQID_PERIOD} days, but generally becomes irrelevant after the cell's - * row is no longer involved in any operations that require strict consistency. - * @return seqId (always > 0 if exists), or 0 if it no longer exists - * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. + * Returns the type of cell in a human readable format using {@link Type}. + *

    + * Note : This does not expose the internal types of Cells like {@link KeyValue.Type#Maximum} and + * {@link KeyValue.Type#Minimum} + * @return The data type this cell: one of Put, Delete, etc */ - @Deprecated - long getSequenceId(); + Type getType(); - // 7) Value + // 6) Value /** * Contiguous raw bytes that may start at any index in the containing array. Max length is @@ -151,48 +140,6 @@ public interface Cell extends HeapSize { /** Returns Serialized size (defaults to include tag length if has some tags). */ int getSerializedSize(); - /** - * Contiguous raw bytes representing tags that may start at any index in the containing array. - * @return the tags byte array - * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Tags are are now internal. - */ - @Deprecated - byte[] getTagsArray(); - - /** - * Return the first offset where the tags start in the Cell - * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Tags are are now internal. - */ - @Deprecated - int getTagsOffset(); - - /** - * HBase internally uses 2 bytes to store tags length in Cell. As the tags length is always a - * non-negative number, to make good use of the sign bit, the max of tags length is defined 2 * - * Short.MAX_VALUE + 1 = 65535. As a result, the return type is int, because a short is not - * capable of handling that. Please note that even if the return type is int, the max tags length - * is far less than Integer.MAX_VALUE. - * @return the total length of the tags in the Cell. - * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Tags are are now internal. - */ - @Deprecated - int getTagsLength(); - - /** - * Returns the type of cell in a human readable format using {@link Type}. Note : This does not - * expose the internal types of Cells like {@link KeyValue.Type#Maximum} and - * {@link KeyValue.Type#Minimum} - * @return The data type this cell: one of Put, Delete, etc - */ - default Type getType() { - byte byteType = getTypeByte(); - Type t = Type.CODE_ARRAY[byteType & 0xff]; - if (t != null) { - return t; - } - throw new UnsupportedOperationException("Invalid type of cell " + byteType); - } - /** * The valid types for user to build the cell. Currently, This is subset of {@link KeyValue.Type}. */ @@ -216,13 +163,5 @@ enum Type { public byte getCode() { return this.code; } - - private static final Type[] CODE_ARRAY = new Type[256]; - - static { - for (Type t : Type.values()) { - CODE_ARRAY[t.code & 0xff] = t; - } - } } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 0cd0905cc3a6..0e6a53ca7c47 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -96,8 +96,12 @@ public int compare(final Cell l, final Cell r, boolean ignoreSequenceid) { return diff; } } + + if (ignoreSequenceid) { + return diff; + } // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid ? diff : Long.compare(r.getSequenceId(), l.getSequenceId()); + return Long.compare(PrivateCellUtil.getSequenceId(r), PrivateCellUtil.getSequenceId(l)); } private int compareKeyValues(final KeyValue left, final KeyValue right) { @@ -720,11 +724,13 @@ public final int compareWithoutRow(final Cell left, final Cell right) { int rFamLength = right.getFamilyLength(); int lQualLength = left.getQualifierLength(); int rQualLength = right.getQualifierLength(); - if (lFamLength + lQualLength == 0 && left.getTypeByte() == KeyValue.Type.Minimum.getCode()) { + byte leftType = PrivateCellUtil.getTypeByte(left); + byte rightType = PrivateCellUtil.getTypeByte(right); + if (lFamLength + lQualLength == 0 && leftType == KeyValue.Type.Minimum.getCode()) { // left is "bigger", i.e. it appears later in the sorted order return 1; } - if (rFamLength + rQualLength == 0 && right.getTypeByte() == KeyValue.Type.Minimum.getCode()) { + if (rFamLength + rQualLength == 0 && rightType == KeyValue.Type.Minimum.getCode()) { return -1; } if (lFamLength != rFamLength) { @@ -746,7 +752,7 @@ public final int compareWithoutRow(final Cell left, final Cell right) { // of higher numbers sort before those of lesser numbers. Maximum (255) // appears ahead of everything, and minimum (0) appears after // everything. - return (0xff & right.getTypeByte()) - (0xff & left.getTypeByte()); + return (0xff & rightType) - (0xff & leftType); } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 10213b143632..3ee3a5159c47 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -570,15 +570,13 @@ public static boolean matchingTags(final Cell left, final Cell right) { * Return true if a delete type, a {@link KeyValue.Type#Delete} or a {KeyValue.Type#DeleteFamily} * or a {@link KeyValue.Type#DeleteColumn} KeyValue type. */ - @SuppressWarnings("deprecation") public static boolean isDelete(final Cell cell) { - return PrivateCellUtil.isDelete(cell.getTypeByte()); + return PrivateCellUtil.isDelete(PrivateCellUtil.getTypeByte(cell)); } /** Returns True if this cell is a Put. */ - @SuppressWarnings("deprecation") public static boolean isPut(Cell cell) { - return cell.getTypeByte() == KeyValue.Type.Put.getCode(); + return PrivateCellUtil.getTypeByte(cell) == KeyValue.Type.Put.getCode(); } /** @@ -629,13 +627,21 @@ public static String getCellKeyAsString(Cell cell, Function rowCon sb.append('/'); sb.append(KeyValue.humanReadableTimestamp(cell.getTimestamp())); sb.append('/'); - sb.append(KeyValue.Type.codeToType(cell.getTypeByte())); + if (cell instanceof ExtendedCell) { + sb.append(KeyValue.Type.codeToType(((ExtendedCell) cell).getTypeByte())); + } else { + sb.append(cell.getType()); + } + if (!(cell instanceof KeyValue.KeyOnlyKeyValue)) { sb.append("/vlen="); sb.append(cell.getValueLength()); } - sb.append("/seqid="); - sb.append(cell.getSequenceId()); + if (cell instanceof ExtendedCell) { + sb.append("/seqid="); + sb.append(((ExtendedCell) cell).getSequenceId()); + } + return sb.toString(); } @@ -651,8 +657,12 @@ public static String toString(Cell cell, boolean verbose) { String value = null; if (verbose) { // TODO: pretty print tags as well - if (cell.getTagsLength() > 0) { - tag = Bytes.toStringBinary(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); + if (cell instanceof RawCell) { + RawCell rawCell = (RawCell) cell; + if (rawCell.getTagsLength() > 0) { + tag = Bytes.toStringBinary(rawCell.getTagsArray(), rawCell.getTagsOffset(), + rawCell.getTagsLength()); + } } if (!(cell instanceof KeyValue.KeyOnlyKeyValue)) { value = @@ -675,7 +685,8 @@ public static String toString(Cell cell, boolean verbose) { public static boolean equals(Cell a, Cell b) { return matchingRows(a, b) && matchingFamily(a, b) && matchingQualifier(a, b) - && matchingTimestamp(a, b) && a.getTypeByte() == b.getTypeByte(); + && matchingTimestamp(a, b) + && PrivateCellUtil.getTypeByte(a) == PrivateCellUtil.getTypeByte(b); } public static boolean matchingTimestamp(Cell a, Cell b) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java index 28e648ec466e..fa470213cd85 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java @@ -147,25 +147,17 @@ default int getChunkId() { */ long getSequenceId(); - /** - * Contiguous raw bytes representing tags that may start at any index in the containing array. - * @return the tags byte array - */ - byte[] getTagsArray(); - - /** Returns the first offset where the tags start in the Cell */ - int getTagsOffset(); + /** Returns The byte representation of the KeyValue.TYPE of this cell: one of Put, Delete, etc */ + byte getTypeByte(); /** - * HBase internally uses 2 bytes to store tags length in Cell. As the tags length is always a - * non-negative number, to make good use of the sign bit, the max of tags length is defined 2 * - * Short.MAX_VALUE + 1 = 65535. As a result, the return type is int, because a short is not - * capable of handling that. Please note that even if the return type is int, the max tags length - * is far less than Integer.MAX_VALUE. - * @return the total length of the tags in the Cell. + * Typically, at server side, you'd better always use the {@link #getTypeByte()} as this method + * does not expose the {@code Maximum} and {@code Minimum} because they will not be returned to + * client, but at server side, we do have cells with these types so if you use this method it will + * cause exceptions. */ - int getTagsLength(); - - /** Returns The byte representation of the KeyValue.TYPE of this cell: one of Put, Delete, etc */ - byte getTypeByte(); + @Override + default Type getType() { + return PrivateCellUtil.code2Type(getTypeByte()); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index a87a5214fadf..106875a59dde 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -673,7 +673,7 @@ public KeyValue(byte[] row, int roffset, int rlength, byte[] family, int foffset this.offset = 0; } - public KeyValue(Cell c) { + public KeyValue(ExtendedCell c) { this(c.getRowArray(), c.getRowOffset(), c.getRowLength(), c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength(), c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength(), c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), @@ -992,7 +992,7 @@ public int hashCode() { return calculateHashForKey(this); } - private int calculateHashForKey(Cell cell) { + private int calculateHashForKey(ExtendedCell cell) { // pre-calculate the 3 hashes made of byte ranges int rowHash = Bytes.hashCode(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); int familyHash = diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index 6c8f2e6e4edb..216e7410d4a4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -22,11 +22,9 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.IOUtils; @@ -708,7 +706,6 @@ public static KeyValue create(final DataInput in) throws IOException { * useful marking a stream as done. */ public static KeyValue create(int length, final DataInput in) throws IOException { - if (length <= 0) { if (length == 0) return null; throw new IOException("Failed read " + length + " bytes, stream corrupt?"); @@ -720,60 +717,4 @@ public static KeyValue create(int length, final DataInput in) throws IOException in.readFully(bytes); return new KeyValue(bytes, 0, length); } - - public static int getSerializedSize(Cell cell, boolean withTags) { - if (withTags) { - return cell.getSerializedSize(); - } - if (cell instanceof ExtendedCell) { - return ((ExtendedCell) cell).getSerializedSize(withTags); - } - return length(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength(), - cell.getValueLength(), cell.getTagsLength(), withTags); - } - - public static int oswrite(final Cell cell, final OutputStream out, final boolean withTags) - throws IOException { - if (cell instanceof ExtendedCell) { - return ((ExtendedCell) cell).write(out, withTags); - } else { - short rlen = cell.getRowLength(); - byte flen = cell.getFamilyLength(); - int qlen = cell.getQualifierLength(); - int vlen = cell.getValueLength(); - int tlen = cell.getTagsLength(); - // write key length - int klen = keyLength(rlen, flen, qlen); - ByteBufferUtils.putInt(out, klen); - // write value length - ByteBufferUtils.putInt(out, vlen); - // Write rowkey - 2 bytes rk length followed by rowkey bytes - StreamUtils.writeShort(out, rlen); - out.write(cell.getRowArray(), cell.getRowOffset(), rlen); - // Write cf - 1 byte of cf length followed by the family bytes - out.write(flen); - out.write(cell.getFamilyArray(), cell.getFamilyOffset(), flen); - // write qualifier - out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qlen); - // write timestamp - StreamUtils.writeLong(out, cell.getTimestamp()); - // write the type - out.write(cell.getTypeByte()); - // write value - out.write(cell.getValueArray(), cell.getValueOffset(), vlen); - int size = klen + vlen + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; - // write tags if we have to - if (withTags && tlen > 0) { - // 2 bytes tags length followed by tags bytes - // tags length is serialized with 2 bytes only(short way) even if the - // type is int. As this - // is non -ve numbers, we save the sign bit. See HBASE-11437 - out.write((byte) (0xff & (tlen >> 8))); - out.write((byte) (0xff & tlen)); - out.write(cell.getTagsArray(), cell.getTagsOffset(), tlen); - size += tlen + KeyValue.TAGS_LENGTH_SIZE; - } - return size; - } - } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java index 43d9e3ee9d7f..2a7d86ded111 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java @@ -24,8 +24,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.primitives.Longs; - /** * A {@link CellComparatorImpl} for hbase:meta catalog table {@link KeyValue}s. */ @@ -92,8 +90,11 @@ public int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { return diff; } + if (ignoreSequenceid) { + return diff; + } // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid ? diff : Longs.compare(b.getSequenceId(), a.getSequenceId()); + return Long.compare(PrivateCellUtil.getSequenceId(b), PrivateCellUtil.getSequenceId(a)); } @FunctionalInterface diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java index bf514d81c5a2..08160145455c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java @@ -1648,7 +1648,7 @@ public byte getTypeByte() { } @Override - public Type getType() { + public Cell.Type getType() { throw new UnsupportedOperationException(); } } @@ -3063,4 +3063,36 @@ public boolean advance() { } }; } + + private static final Cell.Type[] CELL_TYPE_CODE_ARRAY = new Cell.Type[256]; + + static { + for (Cell.Type t : Cell.Type.values()) { + CELL_TYPE_CODE_ARRAY[t.getCode() & 0xff] = t; + } + } + + public static Cell.Type code2Type(byte code) { + Cell.Type t = CELL_TYPE_CODE_ARRAY[code & 0xff]; + if (t != null) { + return t; + } + throw new UnsupportedOperationException("Invalid type of cell " + code); + } + + public static byte getTypeByte(Cell c) { + if (c instanceof ExtendedCell) { + return ((ExtendedCell) c).getTypeByte(); + } else { + return c.getType().getCode(); + } + } + + public static long getSequenceId(Cell c) { + if (c instanceof ExtendedCell) { + return ((ExtendedCell) c).getSequenceId(); + } else { + return HConstants.NO_SEQNUM; + } + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index 5ba344770a3d..3b638f28f72b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -33,6 +33,27 @@ public interface RawCell extends Cell { static final int MAX_TAGS_LENGTH = (2 * Short.MAX_VALUE) + 1; + /** + * Contiguous raw bytes representing tags that may start at any index in the containing array. + * @return the tags byte array + */ + byte[] getTagsArray(); + + /** + * Return the first offset where the tags start in the Cell + */ + int getTagsOffset(); + + /** + * HBase internally uses 2 bytes to store tags length in Cell. As the tags length is always a + * non-negative number, to make good use of the sign bit, the max of tags length is defined 2 * + * Short.MAX_VALUE + 1 = 65535. As a result, the return type is int, because a short is not + * capable of handling that. Please note that even if the return type is int, the max tags length + * is far less than Integer.MAX_VALUE. + * @return the total length of the tags in the Cell. + */ + int getTagsLength(); + /** * Allows cloning the tags in the cell to a new byte[] * @return the byte[] having the tags diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java index ef40b395b7b1..68fd8f420ed7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java @@ -58,8 +58,8 @@ public KeyValueEncoder(final OutputStream out) { public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Do not write tags over RPC - ByteBufferUtils.putInt(this.out, KeyValueUtil.getSerializedSize(cell, false)); - KeyValueUtil.oswrite(cell, out, false); + ByteBufferUtils.putInt(this.out, cell.getSerializedSize(false)); + cell.write(out, false); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java index 655bc4c5f261..fc2e63693025 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java @@ -61,8 +61,8 @@ public KeyValueEncoder(final OutputStream out) { public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Write tags - ByteBufferUtils.putInt(this.out, KeyValueUtil.getSerializedSize(cell, true)); - KeyValueUtil.oswrite(cell, out, true); + ByteBufferUtils.putInt(this.out, cell.getSerializedSize(true)); + cell.write(out, true); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java index 7fb4fd9685e9..f0f63d23c51f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/NoneEncoder.java @@ -21,7 +21,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.io.WritableUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -41,7 +40,7 @@ public int write(ExtendedCell cell) throws IOException { // We write tags seperately because though there is no tag in KV // if the hfilecontext says include tags we need the tags length to be // written - int size = KeyValueUtil.oswrite(cell, out, false); + int size = cell.write(out, false); // Write the additional tag into the stream if (encodingCtx.getHFileContext().isIncludesTags()) { int tagsLength = cell.getTagsLength(); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java index 15ccf3a04554..7860b62dfa70 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java @@ -79,7 +79,7 @@ public void testExtendedCellBuilderWithShallowCopy() { byte[] value = new byte[] { OLD_DATA }; byte[] tags = new byte[] { OLD_DATA }; long seqId = 999; - Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + ExtendedCell cell = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) .setFamily(family).setQualifier(qualifier).setType(KeyValue.Type.Put.getCode()) .setValue(value).setTags(tags).setSequenceId(seqId).build(); row[0] = NEW_DATA; @@ -103,7 +103,7 @@ public void testExtendedCellBuilderWithDeepCopy() { byte[] value = new byte[] { OLD_DATA }; byte[] tags = new byte[] { OLD_DATA }; long seqId = 999; - Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) + ExtendedCell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) .setFamily(family).setQualifier(qualifier).setType(KeyValue.Type.Put.getCode()) .setValue(value).setTags(tags).setSequenceId(seqId).build(); row[0] = NEW_DATA; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestIndividualBytesFieldCell.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestIndividualBytesFieldCell.java index 20c279366c36..0a1cf713fea9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestIndividualBytesFieldCell.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestIndividualBytesFieldCell.java @@ -152,10 +152,10 @@ public void testNullFamilyQualifierValueTags() { byte[] value = null; byte[] tags = null; - Cell ic1 = + ExtendedCell ic1 = new IndividualBytesFieldCell(row, family, qualifier, timestamp, type, seqId, value, tags); - Cell kv1 = new KeyValue(row, family, qualifier, timestamp, type, value, tags); + ExtendedCell kv1 = new KeyValue(row, family, qualifier, timestamp, type, value, tags); byte[] familyArrayInKV = Bytes.copy(kv1.getFamilyArray(), kv1.getFamilyOffset(), kv1.getFamilyLength()); byte[] qualifierArrayInKV = diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index d345dce15ac1..1644a6f1fce7 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -578,8 +578,8 @@ public void testKeyValueSerialization() throws Exception { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); for (KeyValue kv : keyValues) { DataOutputStream os = new DataOutputStream(byteArrayOutputStream); - ByteBufferUtils.putInt(os, KeyValueUtil.getSerializedSize(kv, true)); - KeyValueUtil.oswrite(kv, os, true); + ByteBufferUtils.putInt(os, kv.getSerializedSize(true)); + kv.write(os, true); } DataInputStream is = new DataInputStream(new ByteArrayInputStream(byteArrayOutputStream.toByteArray())); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 888e285f340e..99b1dd112b98 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; @@ -167,8 +169,8 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName()); Put put = null; Delete del = null; - Cell lastCell = null; - for (Cell cell : value.getCells()) { + ExtendedCell lastCell = null; + for (ExtendedCell cell : WALEditInternalHelper.getExtendedCells(value)) { context.getCounter(Counter.CELLS_READ).increment(1); // Filtering WAL meta marker entries. if (WALEdit.isMetaEditFamily(cell)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index ce9cab6bf3b2..8082a2db69c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2216,7 +2216,7 @@ public ReplicateWALEntryResponse replicateWALEntry(final RpcController controlle requestCount.increment(); List entries = request.getEntryList(); checkShouldRejectReplicationRequest(entries); - CellScanner cellScanner = getAndReset(controller); + ExtendedCellScanner cellScanner = getAndReset(controller); server.getRegionServerCoprocessorHost().preReplicateLogEntries(); server.getReplicationSinkService().replicateLogEntries(entries, cellScanner, request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java index 5f893efd88f3..c5b853a23bb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; @@ -40,6 +40,7 @@ public interface ReplicationSinkService extends ReplicationService { * directory required for replicating hfiles * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory */ - void replicateLogEntries(List entries, CellScanner cells, String replicationClusterId, - String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException; + void replicateLogEntries(List entries, ExtendedCellScanner cells, + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index c6e1dfe01718..dff9ac0efe5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -237,7 +237,8 @@ boolean passesBloomFilter(Scan scan, final SortedSet columns) { if (columns != null && columns.size() == 1) { byte[] column = columns.first(); // create the required fake key - Cell kvKey = PrivateCellUtil.createFirstOnRow(row, HConstants.EMPTY_BYTE_ARRAY, column); + ExtendedCell kvKey = + PrivateCellUtil.createFirstOnRow(row, HConstants.EMPTY_BYTE_ARRAY, column); return passesGeneralRowColBloomFilter(kvKey); } @@ -307,14 +308,14 @@ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLe * multi-column query. the cell to check if present in BloomFilter * @return True if passes */ - public boolean passesGeneralRowColBloomFilter(Cell cell) { + public boolean passesGeneralRowColBloomFilter(ExtendedCell cell) { BloomFilter bloomFilter = this.generalBloomFilter; if (bloomFilter == null) { bloomFilterMetrics.incrementEligible(); return true; } // Used in ROW_COL bloom - Cell kvKey = null; + ExtendedCell kvKey = null; // Already if the incoming key is a fake rowcol key then use it as it is if (cell.getTypeByte() == KeyValue.Type.Maximum.getCode() && cell.getFamilyLength() == 0) { kvKey = cell; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 87154a62066c..8645f6054f89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -397,8 +397,8 @@ public EnsureKvEncoder(OutputStream out) { public void write(ExtendedCell cell) throws IOException { checkFlushed(); // Make sure to write tags into WAL - ByteBufferUtils.putInt(this.out, KeyValueUtil.getSerializedSize(cell, true)); - KeyValueUtil.oswrite(cell, this.out, true); + ByteBufferUtils.putInt(this.out, cell.getSerializedSize(true)); + cell.write(out, true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java index c06c6d19a654..9e65ae7c364a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.wal.WALEdit; @@ -49,7 +49,7 @@ public class BulkLoadCellFilter { * @param famPredicate Returns true of given family should be removed. * @return The filtered cell. */ - public Cell filterCell(Cell cell, Predicate famPredicate) { + public ExtendedCell filterCell(ExtendedCell cell, Predicate famPredicate) { byte[] fam; BulkLoadDescriptor bld = null; try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java index 82ac9ebd1f32..0f4e62ec9ad9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java @@ -19,6 +19,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; @@ -49,11 +51,13 @@ public Entry filter(Entry entry) { @Override public Cell filterCell(final Entry entry, Cell cell) { + ExtendedCell extendedCell = PrivateCellUtil.ensureExtendedCell(cell); ReplicationPeerConfig peerConfig = this.peer.getPeerConfig(); TableName tableName = entry.getKey().getTableName(); if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) { // If the cell is about BULKLOAD event, unpack and filter it by BulkLoadCellFilter. - return bulkLoadFilter.filterCell(cell, fam -> !peerConfig.needToReplicate(tableName, fam)); + return bulkLoadFilter.filterCell(extendedCell, + fam -> !peerConfig.needToReplicate(tableName, fam)); } else { return peerConfig.needToReplicate(tableName, CellUtil.cloneFamily(cell)) ? cell : null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java index c8141b683406..acaf5756879f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Stoppable; @@ -56,7 +56,7 @@ public class ReplicationSinkServiceImpl implements ReplicationSinkService { private int statsPeriodInSecond; @Override - public void replicateLogEntries(List entries, CellScanner cells, + public void replicateLogEntries(List entries, ExtendedCellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException { this.replicationSink.replicateEntries(entries, cells, replicationClusterId, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java index 6dc41bcc014a..897e06f4a9fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java @@ -20,7 +20,9 @@ import java.util.NavigableMap; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; @@ -50,13 +52,14 @@ private boolean hasGlobalScope(NavigableMap scopes, byte[] fami @Override public Cell filterCell(Entry entry, Cell cell) { + ExtendedCell extendedCell = PrivateCellUtil.ensureExtendedCell(cell); NavigableMap scopes = entry.getKey().getReplicationScopes(); if (scopes == null || scopes.isEmpty()) { return null; } byte[] family = CellUtil.cloneFamily(cell); if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) { - return bulkLoadFilter.filterCell(cell, new Predicate() { + return bulkLoadFilter.filterCell(extendedCell, new Predicate() { @Override public boolean apply(byte[] family) { return !hasGlobalScope(scopes, family); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 8610a6d43bd7..ff8adfceec0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -44,8 +44,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -192,7 +193,7 @@ private void decorateConf() { * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory * @throws IOException If failed to replicate the data */ - public void replicateEntries(List entries, final CellScanner cells, + public void replicateEntries(List entries, final ExtendedCellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException { if (entries.isEmpty()) { @@ -225,7 +226,7 @@ public void replicateEntries(List entries, final CellScanner cells, continue; } } - Cell previousCell = null; + ExtendedCell previousCell = null; Mutation mutation = null; int count = entry.getAssociatedCellCount(); for (int i = 0; i < count; i++) { @@ -234,7 +235,7 @@ public void replicateEntries(List entries, final CellScanner cells, this.metrics.incrementFailedBatches(); throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i); } - Cell cell = cells.current(); + ExtendedCell cell = cells.current(); // Handle bulk load hfiles replication if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); @@ -430,7 +431,7 @@ private String getHFilePath(TableName table, BulkLoadDescriptor bld, String stor } /** Returns True if we have crossed over onto a new row or type */ - private boolean isNewRowOrType(final Cell previousCell, final Cell cell) { + private boolean isNewRowOrType(final ExtendedCell previousCell, final ExtendedCell cell) { return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRows(previousCell, cell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index f0158f299f22..9ccf3c85d611 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -563,7 +563,7 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce (col.getQualifierLength() == 0 && request == OpType.DELETE) || CellUtil.matchingQualifier(cell, col) ) { - byte type = col.getTypeByte(); + byte type = PrivateCellUtil.getTypeByte(col); if (considerCellTs) { curColCheckTs = col.getTimestamp(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index fab9936165d1..626cefad3f78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellScanner; @@ -490,7 +489,7 @@ public static List getMutationsFromWALEntry(AdminProtos.WALEntry : entry.getKey().getLogSequenceNumber(); int count = entry.getAssociatedCellCount(); List mutations = new ArrayList<>(); - Cell previousCell = null; + ExtendedCell previousCell = null; Mutation m = null; WALKeyImpl key = null; WALEdit val = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index ac9d1fd1fa8d..53c7bca15af9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -892,7 +892,7 @@ public void testGetShortMidpoint() { long ts = 5; KeyValue kv1 = new KeyValue(Bytes.toBytes("the quick brown fox"), family, qualA, ts, Type.Put); KeyValue kv2 = new KeyValue(Bytes.toBytes("the who test text"), family, qualA, ts, Type.Put); - Cell newKey = HFileWriterImpl.getMidpoint(keyComparator, kv1, kv2); + ExtendedCell newKey = HFileWriterImpl.getMidpoint(keyComparator, kv1, kv2); assertTrue(keyComparator.compare(kv1, newKey) < 0); assertTrue((keyComparator.compare(kv2, newKey)) > 0); byte[] expectedArray = Bytes.toBytes("the r"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java index 7ab7578df1c2..d9aed0e6871d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java @@ -36,9 +36,9 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilder; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -278,7 +278,7 @@ protected void assertTableNoValue(Table table, byte[] row, byte[] value) throws private String createHFileForFamilies(byte[] row, byte[] value, Configuration clusterConfig) throws IOException { - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); cellBuilder.setRow(row).setFamily(TestReplicationBase.famName).setQualifier(Bytes.toBytes("1")) .setValue(value).setType(Cell.Type.Put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java index bfc80232792f..067b3c45e162 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java @@ -32,9 +32,9 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilder; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -256,7 +256,7 @@ protected void bulkLoadOnCluster(TableName tableName, byte[] family) throws Exce } private String createHFileForFamilies(byte[] family) throws IOException { - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); cellBuilder.setRow(row).setFamily(family).setQualifier(qualifier).setValue(value) .setType(Cell.Type.Put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 99c811e720c0..d923327f5b72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -106,6 +106,7 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; @@ -4839,9 +4840,9 @@ public void doAnAction() throws Exception { } assertTrue(timestamp >= prevTimestamp); prevTimestamp = timestamp; - Cell previousKV = null; + ExtendedCell previousKV = null; - for (Cell kv : result.rawCells()) { + for (ExtendedCell kv : ClientInternalHelper.getExtendedRawCells(result)) { byte[] thisValue = CellUtil.cloneValue(kv); if (previousKV != null) { if (Bytes.compareTo(CellUtil.cloneValue(previousKV), thisValue) != 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java index 6f5ef2c10257..e64b4cc60471 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java @@ -25,8 +25,8 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -135,12 +135,12 @@ protected void closeTestSegments(List segments) { protected void verifyNext(MemStoreSegmentsIterator iterator) { // check first cell assertTrue(iterator.hasNext()); - Cell firstCell = iterator.next(); + ExtendedCell firstCell = iterator.next(); assertEquals(LESS_THAN_INTEGER_MAX_VALUE_SEQ_ID, firstCell.getSequenceId()); // check second cell assertTrue(iterator.hasNext()); - Cell secondCell = iterator.next(); + ExtendedCell secondCell = iterator.next(); assertEquals(GREATER_THAN_INTEGER_MAX_VALUE_SEQ_ID, secondCell.getSequenceId()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index b04a0054276c..48d42ae18276 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -311,9 +311,9 @@ public void testFlushAndCompactionWithoutTags() throws Exception { try { Result[] next = scanner.next(3); for (Result result : next) { - CellScanner cellScanner = result.cellScanner(); + ExtendedCellScanner cellScanner = result.cellScanner(); cellScanner.advance(); - Cell current = cellScanner.current(); + ExtendedCell current = cellScanner.current(); assertEquals(0, current.getTagsLength()); } } finally { @@ -328,9 +328,9 @@ public void testFlushAndCompactionWithoutTags() throws Exception { try { Result[] next = scanner.next(3); for (Result result : next) { - CellScanner cellScanner = result.cellScanner(); + ExtendedCellScanner cellScanner = result.cellScanner(); cellScanner.advance(); - Cell current = cellScanner.current(); + ExtendedCell current = cellScanner.current(); assertEquals(0, current.getTagsLength()); } } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index dc634632c946..bdb51ebe36f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -35,12 +35,12 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileTestUtil; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -165,11 +166,11 @@ public void setUp() throws Exception { @Test public void testBatchSink() throws Exception { List entries = new ArrayList<>(BATCH_SIZE); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); @@ -182,12 +183,12 @@ public void testBatchSink() throws Exception { @Test public void testMixedPutDelete() throws Exception { List entries = new ArrayList<>(BATCH_SIZE / 2); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < BATCH_SIZE / 2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, - baseNamespaceDir, hfileArchiveDir); + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); entries = new ArrayList<>(BATCH_SIZE); cells = new ArrayList<>(); @@ -196,7 +197,7 @@ public void testMixedPutDelete() throws Exception { i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); @@ -206,12 +207,12 @@ public void testMixedPutDelete() throws Exception { @Test public void testLargeEditsPutDelete() throws Exception { List entries = new ArrayList<>(); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < 5510; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, - baseNamespaceDir, hfileArchiveDir); + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); ResultScanner resultScanner = table1.getScanner(new Scan()); int totalRows = 0; @@ -226,8 +227,8 @@ public void testLargeEditsPutDelete() throws Exception { entries.add(createEntry(TABLE_NAME1, i, i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, - baseNamespaceDir, hfileArchiveDir); + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); resultScanner = table1.getScanner(new Scan()); totalRows = 0; while (resultScanner.next() != null) { @@ -242,12 +243,12 @@ public void testLargeEditsPutDelete() throws Exception { @Test public void testMixedPutTables() throws Exception { List entries = new ArrayList<>(BATCH_SIZE / 2); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table2.getScanner(scan); @@ -266,11 +267,11 @@ public void testMixedPutTables() throws Exception { @Test public void testMixedDeletes() throws Exception { List entries = new ArrayList<>(3); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < 3; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); entries = new ArrayList<>(3); cells = new ArrayList<>(); @@ -278,7 +279,7 @@ public void testMixedDeletes() throws Exception { entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells)); - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); @@ -293,7 +294,7 @@ public void testMixedDeletes() throws Exception { @Test public void testApplyDeleteBeforePut() throws Exception { List entries = new ArrayList<>(5); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < 2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } @@ -301,7 +302,7 @@ public void testApplyDeleteBeforePut() throws Exception { for (int i = 3; i < 5; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Get get = new Get(Bytes.toBytes(1)); Result res = table1.get(get); @@ -312,12 +313,12 @@ public void testApplyDeleteBeforePut() throws Exception { public void testRethrowRetriesExhaustedException() throws Exception { TableName notExistTable = TableName.valueOf("notExistTable"); List entries = new ArrayList<>(); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < 10; i++) { entries.add(createEntry(notExistTable, i, KeyValue.Type.Put, cells)); } try { - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Assert.fail("Should re-throw TableNotFoundException."); } catch (TableNotFoundException e) { @@ -331,8 +332,9 @@ public void testRethrowRetriesExhaustedException() throws Exception { try (Admin admin = conn.getAdmin()) { admin.disableTable(TABLE_NAME1); try { - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), - replicationClusterId, baseNamespaceDir, hfileArchiveDir); + SINK.replicateEntries(entries, + PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, + baseNamespaceDir, hfileArchiveDir); Assert.fail("Should re-throw RetriesExhaustedWithDetailsException."); } catch (RetriesExhaustedException e) { } finally { @@ -412,7 +414,9 @@ public void testReplicateEntriesForHFiles() throws Exception { assertEquals(0, scanner.next(numRows).length); } // 7. Replicate the bulk loaded entry - SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), + SINK.replicateEntries(entries, + PrivateCellUtil + .createExtendedCellScanner(WALEditInternalHelper.getExtendedCells(edit).iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); try (ResultScanner scanner = table1.getScanner(new Scan())) { // 8. Assert data is replicated @@ -429,13 +433,13 @@ public void testFailedReplicationSinkMetrics() throws IOException { long initialFailedBatches = SINK.getSinkMetrics().getFailedBatches(); long errorCount = 0L; List entries = new ArrayList<>(BATCH_SIZE); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); for (int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } cells.clear(); // cause IndexOutOfBoundsException try { - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Assert.fail("Should re-throw ArrayIndexOutOfBoundsException."); } catch (ArrayIndexOutOfBoundsException e) { @@ -450,7 +454,7 @@ public void testFailedReplicationSinkMetrics() throws IOException { entries.add(createEntry(notExistTable, i, KeyValue.Type.Put, cells)); } try { - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + SINK.replicateEntries(entries, PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); Assert.fail("Should re-throw TableNotFoundException."); } catch (TableNotFoundException e) { @@ -468,8 +472,9 @@ public void testFailedReplicationSinkMetrics() throws IOException { try (Admin admin = conn.getAdmin()) { admin.disableTable(TABLE_NAME1); try { - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), - replicationClusterId, baseNamespaceDir, hfileArchiveDir); + SINK.replicateEntries(entries, + PrivateCellUtil.createExtendedCellScanner(cells.iterator()), replicationClusterId, + baseNamespaceDir, hfileArchiveDir); Assert.fail("Should re-throw IOException."); } catch (IOException e) { errorCount++; @@ -481,7 +486,8 @@ public void testFailedReplicationSinkMetrics() throws IOException { } } - private WALEntry createEntry(TableName table, int row, KeyValue.Type type, List cells) { + private WALEntry createEntry(TableName table, int row, KeyValue.Type type, + List cells) { byte[] fam = table.equals(TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2; byte[] rowBytes = Bytes.toBytes(row); // Just make sure we don't get the same ts for two consecutive rows with diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java index d6c7a0250015..11a0e98c5541 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java @@ -28,10 +28,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilder; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -101,7 +102,7 @@ public void testWALEntryFilter() throws IOException { ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString()); // Add WALEdit Cells to Cells List. The way edits arrive at the sink is with protos // describing the edit with all Cells from all edits aggregated in a single CellScanner. - final List cells = new ArrayList<>(); + final List cells = new ArrayList<>(); int count = BOUNDARY * 2; for (int i = 0; i < count; i++) { byte[] bytes = Bytes.toBytes(i); @@ -114,20 +115,21 @@ public void testWALEntryFilter() throws IOException { entryBuilder.setAssociatedCellCount(1); entries.add(entryBuilder.build()); // We need to add a Cell per WALEdit to the cells array. - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); // Make cells whose row, family, cell, value, and ts are == 'i'. - Cell cell = cellBuilder.setRow(bytes).setFamily(bytes).setQualifier(bytes) + ExtendedCell cell = cellBuilder.setRow(bytes).setFamily(bytes).setQualifier(bytes) .setType(Cell.Type.Put).setTimestamp(i).setValue(bytes).build(); cells.add(cell); } // Now wrap our cells array in a CellScanner that we can pass in to replicateEntries. It has // all Cells from all the WALEntries made above. - CellScanner cellScanner = new CellScanner() { + ExtendedCellScanner cellScanner = new ExtendedCellScanner() { // Set to -1 because advance gets called before current. int index = -1; @Override - public Cell current() { + public ExtendedCell current() { return cells.get(index); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java index dfe39990f4c8..42fb28ccaa0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java @@ -24,7 +24,8 @@ import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellScanner; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; @@ -74,8 +75,8 @@ public Mutation beforeMutate(long rowkeyBase, Mutation m) throws IOException { numTags = minNumTags + rand.nextInt(maxNumTags - minNumTags); } List tags; - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - Cell cell = cellScanner.current(); + for (ExtendedCellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { + ExtendedCell cell = cellScanner.current(); byte[] tag = LoadTestDataGenerator.generateData(rand, minTagLength + rand.nextInt(maxTagLength - minTagLength)); tags = new ArrayList<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java index 10ce20049c9e..3dca289cb451 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java @@ -39,12 +39,14 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClientInternalHelper; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Get; @@ -341,10 +343,10 @@ public void testRecoverSequenceId() throws Exception { region.put(new Put(Bytes.toBytes(i)).addColumn(cfd.getName(), QUALIFIER, VALUE1)); Result result = region.get(new Get(Bytes.toBytes(i)).addFamily(cfd.getName())); assertTrue(Bytes.equals(VALUE1, result.getValue(cfd.getName(), QUALIFIER))); - List cells = result.listCells(); - assertEquals(1, cells.size()); + ExtendedCell[] cells = ClientInternalHelper.getExtendedRawCells(result); + assertEquals(1, cells.length); seqIdMap.computeIfAbsent(i, r -> new HashMap<>()).put(cfd.getNameAsString(), - cells.get(0).getSequenceId()); + cells[0].getSequenceId()); } } @@ -362,10 +364,9 @@ public void testRecoverSequenceId() throws Exception { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { Result result = region2.get(new Get(Bytes.toBytes(i)).addFamily(cfd.getName())); assertTrue(Bytes.equals(VALUE1, result.getValue(cfd.getName(), QUALIFIER))); - List cells = result.listCells(); - assertEquals(1, cells.size()); - assertEquals((long) seqIdMap.get(i).get(cfd.getNameAsString()), - cells.get(0).getSequenceId()); + ExtendedCell[] cells = ClientInternalHelper.getExtendedRawCells(result); + assertEquals(1, cells.length); + assertEquals((long) seqIdMap.get(i).get(cfd.getNameAsString()), cells[0].getSequenceId()); } } } From 939ab483f2fd910d1653cacea7d81c212e9a32d1 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 31 Jul 2024 10:08:28 +0800 Subject: [PATCH 489/514] HBASE-28729 Change the generic type of List in InternalScanner.next (#6126) Signed-off-by: Xin Sun --- .../example/DelegatingInternalScanner.java | 5 +-- .../example/ValueRewritingObserver.java | 10 ++++-- .../example/WriteHeavyIncrementObserver.java | 20 ++++++++--- .../hbase/mapreduce/TestImportExport.java | 2 +- .../hbase/mob/DefaultMobStoreCompactor.java | 2 +- .../hadoop/hbase/regionserver/HRegion.java | 10 +++--- .../hbase/regionserver/InternalScanner.java | 30 +++++++++++++---- .../hbase/regionserver/KeyValueHeap.java | 3 +- .../MemStoreCompactorSegmentsIterator.java | 2 +- .../hbase/regionserver/MobStoreScanner.java | 3 +- .../hbase/regionserver/RegionScanner.java | 7 ++-- .../hbase/regionserver/RegionScannerImpl.java | 33 ++++++++++--------- .../regionserver/ReversedMobStoreScanner.java | 12 +++---- .../hbase/regionserver/StoreFlusher.java | 2 +- .../hbase/regionserver/StoreScanner.java | 5 +-- ...tAvoidCellReferencesIntoShippedBlocks.java | 7 ++-- .../client/TestBlockEvictionFromClient.java | 11 ++++--- .../coprocessor/TestCoprocessorInterface.java | 11 ++++--- .../TestRegionObserverInterface.java | 7 ++-- .../TestRegionObserverScannerOpenHook.java | 4 ++- .../io/hfile/TestScannerFromBucketCache.java | 4 +-- .../mob/TestMobCompactionWithException.java | 5 +-- .../hbase/mob/TestMobStoreCompaction.java | 4 +-- .../DelegatingInternalScanner.java | 5 +-- .../hbase/regionserver/TestBlocksScanned.java | 2 +- ...TestCompactionWithShippingCoprocessor.java | 5 +-- .../hbase/regionserver/TestHMobStore.java | 4 +-- .../hbase/regionserver/TestHRegion.java | 12 +++---- .../hadoop/hbase/regionserver/TestHStore.java | 4 +-- .../hbase/regionserver/TestKeepDeletes.java | 8 ++--- .../regionserver/TestMultiColumnScanner.java | 2 +- .../TestScannerHeartbeatMessages.java | 13 +++++--- .../compactions/TestCompactor.java | 5 +-- .../TestStripeCompactionPolicy.java | 5 +-- .../hbase/util/TestCoprocessorScanPolicy.java | 8 +++-- 35 files changed, 164 insertions(+), 108 deletions(-) diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/DelegatingInternalScanner.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/DelegatingInternalScanner.java index 34f8352c2347..71cb339ecf9d 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/DelegatingInternalScanner.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/DelegatingInternalScanner.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.yetus.audience.InterfaceAudience; @@ -37,7 +37,8 @@ public DelegatingInternalScanner(InternalScanner scanner) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return scanner.next(result, scannerContext); } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java index 35cea912bdb9..a3c4010d69a6 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -74,10 +75,11 @@ public InternalScanner preCompact(ObserverContext CompactionRequest request) { InternalScanner modifyingScanner = new InternalScanner() { @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { boolean ret = scanner.next(result, scannerContext); for (int i = 0; i < result.size(); i++) { - Cell c = result.get(i); + Cell c = (Cell) result.get(i); // Replace the Cell if the value is the one we're replacing if (CellUtil.isPut(c) && comparator.compare(CellUtil.cloneValue(c), sourceValue) == 0) { try { @@ -90,7 +92,9 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE byte[] clonedValue = new byte[replacedValue.length]; System.arraycopy(replacedValue, 0, clonedValue, 0, replacedValue.length); cellBuilder.setValue(clonedValue); - result.set(i, cellBuilder.build()); + // all cells in HBase are ExtendedCells, so you are fine to cast it to ExtendedCell, + // just do not use its methods since it may change without any deprecation cycle + result.set(i, (ExtendedCell) cellBuilder.build()); } finally { cellBuilder.clear(); } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java index 5c2db466a767..fcb976a4c535 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -99,11 +100,14 @@ private InternalScanner wrap(byte[] family, InternalScanner scanner) { private long sum; @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { boolean moreRows = scanner.next(srcResult, scannerContext); if (srcResult.isEmpty()) { if (!moreRows && row != null) { - result.add(createCell(row, family, qualifier, timestamp, sum)); + // all cells in HBase are ExtendedCells, so you are fine to cast it to ExtendedCell, + // just do not use its methods since it may change without any deprecation cycle + result.add((ExtendedCell) createCell(row, family, qualifier, timestamp, sum)); } return moreRows; } @@ -114,7 +118,9 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE row = CellUtil.cloneRow(firstCell); qualifier = CellUtil.cloneQualifier(firstCell); } else if (!CellUtil.matchingRows(firstCell, row)) { - result.add(createCell(row, family, qualifier, timestamp, sum)); + // all cells in HBase are ExtendedCells, so you are fine to cast it to ExtendedCell, + // just do not use its methods since it may change without any deprecation cycle + result.add((ExtendedCell) createCell(row, family, qualifier, timestamp, sum)); row = CellUtil.cloneRow(firstCell); qualifier = CellUtil.cloneQualifier(firstCell); sum = 0; @@ -123,14 +129,18 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE if (CellUtil.matchingQualifier(c, qualifier)) { sum += Bytes.toLong(c.getValueArray(), c.getValueOffset()); } else { - result.add(createCell(row, family, qualifier, timestamp, sum)); + // all cells in HBase are ExtendedCells, so you are fine to cast it to ExtendedCell, + // just do not use its methods since it may change without any deprecation cycle + result.add((ExtendedCell) createCell(row, family, qualifier, timestamp, sum)); qualifier = CellUtil.cloneQualifier(c); sum = Bytes.toLong(c.getValueArray(), c.getValueOffset()); } timestamp = c.getTimestamp(); }); if (!moreRows) { - result.add(createCell(row, family, qualifier, timestamp, sum)); + // all cells in HBase are ExtendedCells, so you are fine to cast it to ExtendedCell, + // just do not use its methods since it may change without any deprecation cycle + result.add((ExtendedCell) createCell(row, family, qualifier, timestamp, sum)); } srcResult.clear(); return moreRows; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 63201e857398..43b5fcea8053 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -830,7 +830,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO // Need to use RegionScanner instead of table#getScanner since the latter will // not return tags since it will go through rpc layer and remove tags intentionally. RegionScanner scanner = region.getScanner(scan); - scanner.next((List) values); + scanner.next(values); if (!values.isEmpty()) { break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index 5f0538eb7065..f0beea647611 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -362,7 +362,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); do { - hasMore = scanner.next((List) cells, scannerContext); + hasMore = scanner.next(cells, scannerContext); currentTime = EnvironmentEdgeManager.currentTime(); if (LOG.isDebugEnabled()) { now = currentTime; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index b74935ffafff..0750d0b394fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -3297,7 +3297,7 @@ private void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, b try (RegionScanner scanner = getScanner(new Scan(get))) { // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 - List result = new ArrayList<>(); + List result = new ArrayList<>(); scanner.next(result); if (result.size() < count) { @@ -4070,7 +4070,7 @@ private Map> reckonDeltas(Mutation mutation, for (Map.Entry> entry : ClientInternalHelper .getExtendedFamilyCellMap(mutation).entrySet()) { final byte[] columnFamilyName = entry.getKey(); - List deltas = (List) entry.getValue(); + List deltas = entry.getValue(); // Reckon for the Store what to apply to WAL and MemStore. List toApply = reckonDeltasByStore(region.stores.get(columnFamilyName), mutation, now, deltas, results); @@ -4131,7 +4131,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 List currentValues = new ArrayList<>(); - scanner.next((List) currentValues); + scanner.next(currentValues); // Iterate the input columns and update existing values if they were found, otherwise // add new column initialized to the delta amount int currentValuesIndex = 0; @@ -5063,7 +5063,7 @@ private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutat try (RegionScanner scanner = getScanner(new Scan(get))) { // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 - List result = new ArrayList<>(1); + List result = new ArrayList<>(1); scanner.next(result); if (filter != null) { if (!result.isEmpty()) { @@ -5079,7 +5079,7 @@ private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutat matches = (result.get(0).getValueLength() == 0) == (op != CompareOperator.NOT_EQUAL); cellTs = result.get(0).getTimestamp(); } else if (result.size() == 1) { - Cell kv = result.get(0); + ExtendedCell kv = result.get(0); cellTs = kv.getTimestamp(); int compareResult = PrivateCellUtil.compareValue(kv, comparator); matches = matches(op, compareResult); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index 47ff5c38d39c..8534d6255afd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -20,7 +20,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -41,21 +41,37 @@ public interface InternalScanner extends Closeable { /** * Grab the next row's worth of values. - * @param result return output array + *

    + * The generic type for the output list {@code result} means we will only add {@link ExtendedCell} + * to it. This is useful for the code in HBase as we can pass List<ExtendedCell> here to + * avoid casting, but may cause some troubles for coprocessors which implement this method. In + * general, all cells created via the {@link org.apache.hadoop.hbase.CellBuilder} are actually + * {@link ExtendedCell}s, so if you want to add something to the {@code result} list, you can just + * cast it to {@link ExtendedCell}, although it is marked as IA.Private. + * @param result return output array. We will only add ExtendedCell to this list, but for CP + * users, you'd better just use {@link org.apache.hadoop.hbase.RawCell} as + * {@link ExtendedCell} is IA.Private. * @return true if more rows exist after this one, false if scanner is done - * @throws IOException e */ - default boolean next(List result) throws IOException { + default boolean next(List result) throws IOException { return next(result, NoLimitScannerContext.getInstance()); } /** * Grab the next row's worth of values. - * @param result return output array + *

    + * The generic type for the output list {@code result} means we will only add {@link ExtendedCell} + * to it. This is useful for the code in HBase as we can pass List<ExtendedCell> here to + * avoid casting, but may cause some troubles for coprocessors which implement this method. In + * general, all cells created via the {@link org.apache.hadoop.hbase.CellBuilder} are actually + * {@link ExtendedCell}s, so if you want to add something to the {@code result} list, you can just + * cast it to {@link ExtendedCell}, although it is marked as IA.Private. + * @param result return output array. We will only add ExtendedCell to this list, but for CP + * users, you'd better just use {@link org.apache.hadoop.hbase.RawCell} as + * {@link ExtendedCell} is IA.Private. * @return true if more rows exist after this one, false if scanner is done - * @throws IOException e */ - boolean next(List result, ScannerContext scannerContext) throws IOException; + boolean next(List result, ScannerContext scannerContext) throws IOException; /** * Closes the scanner and releases any resources it has allocated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 5fbb680edcd7..6fd030c13c25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -144,7 +144,8 @@ public ExtendedCell next() throws IOException { * @return true if more rows exist after this one, false if scanner is done */ @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { if (this.current == null) { return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java index 281dac85a270..dcbb471e714e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java @@ -148,7 +148,7 @@ private boolean refillKVS() { // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but // all the server side implementation should only add ExtendedCell to the List, otherwise it // will cause serious assertions in our code - hasMore = compactingScanner.next((List) kvs, scannerContext); + hasMore = compactingScanner.next(kvs, scannerContext); } catch (IOException e) { // should not happen as all data are in memory throw new IllegalStateException(e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java index 9de37c3f40cc..0d6cfb2b2112 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobCell; @@ -64,7 +63,7 @@ public MobStoreScanner(HStore store, ScanInfo scanInfo, Scan scan, * the mob file as the result. */ @Override - public boolean next(List outResult, ScannerContext ctx) throws IOException { + public boolean next(List outResult, ScannerContext ctx) throws IOException { boolean result = super.next(outResult, ctx); if (!rawMobScan) { // retrieve the mob data diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java index cea136a9a057..aed08ebd84d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -78,7 +78,7 @@ default String getOperationId() { * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean nextRaw(List result) throws IOException; + boolean nextRaw(List result) throws IOException; /** * Grab the next row's worth of values. The {@link ScannerContext} is used to enforce and track @@ -109,5 +109,6 @@ default String getOperationId() { * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean nextRaw(List result, ScannerContext scannerContext) throws IOException; + boolean nextRaw(List result, ScannerContext scannerContext) + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java index 81b5f6a6d70c..36acf678ebac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -76,7 +76,7 @@ class RegionScannerImpl implements RegionScanner, Shipper, RpcCallback { * If the joined heap data gathering is interrupted due to scan limits, this will contain the row * for which we are populating the values. */ - protected Cell joinedContinuationRow = null; + protected ExtendedCell joinedContinuationRow = null; private boolean filterClosed = false; protected final byte[] stopRow; @@ -239,14 +239,14 @@ protected final void resetFilters() throws IOException { } @Override - public boolean next(List outResults) throws IOException { + public boolean next(List outResults) throws IOException { // apply the batching limit by default return next(outResults, defaultScannerContext); } @Override - public synchronized boolean next(List outResults, ScannerContext scannerContext) - throws IOException { + public synchronized boolean next(List outResults, + ScannerContext scannerContext) throws IOException { if (this.filterClosed) { throw new UnknownScannerException("Scanner was closed (timed out?) " + "after we renewed it. Could be caused by a very slow scanner " @@ -261,13 +261,14 @@ public synchronized boolean next(List outResults, ScannerContext scannerCo } @Override - public boolean nextRaw(List outResults) throws IOException { + public boolean nextRaw(List outResults) throws IOException { // Use the RegionScanner's context by default return nextRaw(outResults, defaultScannerContext); } @Override - public boolean nextRaw(List outResults, ScannerContext scannerContext) throws IOException { + public boolean nextRaw(List outResults, ScannerContext scannerContext) + throws IOException { if (storeHeap == null) { // scanner is closed throw new UnknownScannerException("Scanner was closed"); @@ -278,7 +279,7 @@ public boolean nextRaw(List outResults, ScannerContext scannerContext) thr // to handle scan or get operation. moreValues = nextInternal(outResults, scannerContext); } else { - List tmpList = new ArrayList<>(); + List tmpList = new ArrayList<>(); moreValues = nextInternal(tmpList, scannerContext); outResults.addAll(tmpList); } @@ -302,8 +303,8 @@ public boolean nextRaw(List outResults, ScannerContext scannerContext) thr } /** Returns true if more cells exist after this batch, false if scanner is done */ - private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) - throws IOException { + private boolean populateFromJoinedHeap(List results, + ScannerContext scannerContext) throws IOException { assert joinedContinuationRow != null; boolean moreValues = populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow); @@ -314,7 +315,7 @@ private boolean populateFromJoinedHeap(List results, ScannerContext scanne } // As the data is obtained from two independent heaps, we need to // ensure that result list is sorted, because Result relies on that. - results.sort(comparator); + ((List) results).sort(comparator); return moreValues; } @@ -324,8 +325,8 @@ private boolean populateFromJoinedHeap(List results, ScannerContext scanne * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. * @return state of last call to {@link KeyValueHeap#next()} */ - private boolean populateResult(List results, KeyValueHeap heap, - ScannerContext scannerContext, Cell currentRowCell) throws IOException { + private boolean populateResult(List results, KeyValueHeap heap, + ScannerContext scannerContext, ExtendedCell currentRowCell) throws IOException { Cell nextKv; boolean moreCellsInRow = false; boolean tmpKeepProgress = scannerContext.getKeepProgress(); @@ -411,7 +412,7 @@ private void resetProgress(ScannerContext scannerContext, int initialBatchProgre } } - private boolean nextInternal(List results, ScannerContext scannerContext) + private boolean nextInternal(List results, ScannerContext scannerContext) throws IOException { Preconditions.checkArgument(results.isEmpty(), "First parameter should be an empty list"); Preconditions.checkArgument(scannerContext != null, "Scanner context cannot be null"); @@ -480,7 +481,7 @@ private boolean nextInternal(List results, ScannerContext scannerContext) // First, check if we are at a stop row. If so, there are no more results. if (shouldStop) { if (hasFilterRow) { - filter.filterRowCells(results); + filter.filterRowCells((List) results); } return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } @@ -535,7 +536,7 @@ private boolean nextInternal(List results, ScannerContext scannerContext) // First filter with the filterRow(List). FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; if (hasFilterRow) { - ret = filter.filterRowCellsWithRet(results); + ret = filter.filterRowCellsWithRet((List) results); // We don't know how the results have changed after being filtered. Must set progress // according to contents of results now. @@ -546,7 +547,7 @@ private boolean nextInternal(List results, ScannerContext scannerContext) scannerContext.clearProgress(); } scannerContext.incrementBatchProgress(results.size()); - for (Cell cell : results) { + for (ExtendedCell cell : (List) results) { scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), cell.heapSize()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java index 81a4cc467f98..398b716fda69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobCell; @@ -63,7 +62,7 @@ public class ReversedMobStoreScanner extends ReversedStoreScanner { * the mob file as the result. */ @Override - public boolean next(List outResult, ScannerContext ctx) throws IOException { + public boolean next(List outResult, ScannerContext ctx) throws IOException { boolean result = super.next(outResult, ctx); if (!rawMobScan) { // retrieve the mob data @@ -73,11 +72,10 @@ public boolean next(List outResult, ScannerContext ctx) throws IOException long mobKVCount = 0; long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { - Cell cell = outResult.get(i); - assert cell instanceof ExtendedCell; - if (MobUtils.isMobReferenceCell((ExtendedCell) cell)) { - MobCell mobCell = mobStore.resolve((ExtendedCell) cell, cacheMobBlocks, readPt, - readEmptyValueOnMobCellMiss); + ExtendedCell cell = (ExtendedCell) outResult.get(i); + if (MobUtils.isMobReferenceCell(cell)) { + MobCell mobCell = + mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); mobKVCount++; mobKVSize += mobCell.getCell().getValueLength(); outResult.set(i, mobCell.getCell()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index c0efe4074a28..569239bb40a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -134,7 +134,7 @@ protected void performFlush(InternalScanner scanner, CellSink sink, // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but // all the server side implementation should only add ExtendedCell to the List, otherwise it // will cause serious assertions in our code - hasMore = scanner.next((List) kvs, scannerContext); + hasMore = scanner.next(kvs, scannerContext); if (!kvs.isEmpty()) { for (ExtendedCell c : kvs) { sink.append(c); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 7aa17d3233a9..016d503f5eab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -547,7 +547,8 @@ public boolean seek(ExtendedCell key) throws IOException { * @return true if there are more rows, false if scanner is done */ @Override - public boolean next(List outResult, ScannerContext scannerContext) throws IOException { + public boolean next(List outResult, ScannerContext scannerContext) + throws IOException { if (scannerContext == null) { throw new IllegalArgumentException("Scanner context cannot be null"); } @@ -830,7 +831,7 @@ private void updateMetricsStore(boolean memstoreRead) { * @param outResult the cells which are visible for user scan * @return null is the top cell doesn't change. Otherwise, the NextState to return */ - private NextState needToReturn(List outResult) { + private NextState needToReturn(List outResult) { if (!outResult.isEmpty() && topChanged) { return heap.peek() == null ? NextState.NO_MORE_VALUES : NextState.MORE_VALUES; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index 520d200f28a0..1017d265aeec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -275,9 +276,11 @@ public CompactorInternalScanner(InternalScanner scanner) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { boolean next = scanner.next(result, scannerContext); - for (Cell cell : result) { + for (Iterator iter = result.iterator(); iter.hasNext();) { + Cell cell = (Cell) iter.next(); if (CellComparatorImpl.COMPARATOR.compareRows(cell, ROW2, 0, ROW2.length) == 0) { try { // hold the compaction diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index 86df2bab8d6a..d2e90e0be43e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -1488,22 +1489,24 @@ public CustomScanner(RegionScanner delegate) { } @Override - public boolean next(List results) throws IOException { + public boolean next(List results) throws IOException { return delegate.next(results); } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return delegate.next(result, scannerContext); } @Override - public boolean nextRaw(List result) throws IOException { + public boolean nextRaw(List result) throws IOException { return delegate.nextRaw(result); } @Override - public boolean nextRaw(List result, ScannerContext context) throws IOException { + public boolean nextRaw(List result, ScannerContext context) + throws IOException { boolean nextRaw = delegate.nextRaw(result, context); if (compactionLatch != null && compactionLatch.getCount() > 0) { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index 195eb9000936..2ba46d072c4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -92,22 +93,24 @@ public CustomScanner(RegionScanner delegate) { } @Override - public boolean next(List results) throws IOException { + public boolean next(List results) throws IOException { return delegate.next(results); } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return delegate.next(result, scannerContext); } @Override - public boolean nextRaw(List result) throws IOException { + public boolean nextRaw(List result) throws IOException { return delegate.nextRaw(result); } @Override - public boolean nextRaw(List result, ScannerContext context) throws IOException { + public boolean nextRaw(List result, ScannerContext context) + throws IOException { return delegate.nextRaw(result, context); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 3787acbbf252..f1c7d0770a08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -31,10 +31,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; @@ -589,8 +589,9 @@ public InternalScanner preCompact(ObserverContext return new InternalScanner() { @Override - public boolean next(List results, ScannerContext scannerContext) throws IOException { - List internalResults = new ArrayList<>(); + public boolean next(List results, ScannerContext scannerContext) + throws IOException { + List internalResults = new ArrayList<>(); boolean hasMore; do { hasMore = scanner.next(internalResults, scannerContext); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index a1ab261cb6b0..beca58744a9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -142,7 +143,8 @@ public void preScannerOpen(ObserverContext c, Scan private static final InternalScanner NO_DATA = new InternalScanner() { @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java index ec1ebfd9d633..dd2bf6a27363 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java @@ -211,7 +211,7 @@ public void testBasicScanWithOffheapBucketCacheWithMBB() throws IOException { actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next((List) actual); + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); // Verify result for (int i = 0; i < expected.size(); i++) { @@ -289,7 +289,7 @@ private List performScan(byte[] row1, byte[] fam1) throws IOExcept List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); - boolean hasNext = scanner.next((List) actual); + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); return actual; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithException.java index 60e967005e8e..192b0e31fb02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithException.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -233,7 +233,8 @@ protected boolean performCompaction(FileDetails fd, final InternalScanner scanne private int count = -1; @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { count++; if (count == rowCount - 1 && testException) { count = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java index 835d8b83d69e..bac1176b99ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java @@ -377,7 +377,7 @@ private int countMobRows() throws IOException { List results = new ArrayList<>(); boolean hasMore = true; while (hasMore) { - hasMore = scanner.next((List) results); + hasMore = scanner.next(results); for (ExtendedCell c : results) { if (MobUtils.isMobReferenceCell(c)) { scannedCount++; @@ -408,7 +408,7 @@ private int countReferencedMobFiles() throws IOException { Set files = new HashSet<>(); do { kvs.clear(); - hasMore = scanner.next((List) kvs); + hasMore = scanner.next(kvs); for (Cell kv : kvs) { if (!MobUtils.isMobReferenceCell((ExtendedCell) kv)) { continue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java index b7ba8086280c..2f18e8b248f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -32,7 +32,8 @@ public DelegatingInternalScanner(InternalScanner scanner) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return scanner.next(result, scannerContext); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index 0d0b2d087832..682dfeb1db21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -115,7 +115,7 @@ private void _testBlocksScanned(TableDescriptor td) throws Exception { InternalScanner s = r.getScanner(scan); List results = new ArrayList<>(); - while (s.next((List) results)) + while (s.next(results)) ; s.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithShippingCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithShippingCoprocessor.java index d8c5c8aa5959..e448f383426c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithShippingCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithShippingCoprocessor.java @@ -24,7 +24,7 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -126,7 +126,8 @@ public ShippedObservingScanner(InternalScanner scanner) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return scanner.next(result, scannerContext); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index c9e6cd83ec6f..0ec1f75e2690 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -312,7 +312,7 @@ public void testGetReferencesFromFiles() throws IOException { scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), 0); List results = new ArrayList<>(); - scanner.next((List) results); + scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); @@ -400,7 +400,7 @@ public void testMobCellSizeThreshold() throws IOException { scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), 0); List results = new ArrayList<>(); - scanner.next((List) results); + scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index d923327f5b72..3c75c8431159 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -3791,7 +3791,7 @@ public void testRegionScanner_Next() throws IOException { expected1.add(new KeyValue(row1, fam4, null, ts, KeyValue.Type.Put, null)); res = new ArrayList<>(); - is.next((List) res); + is.next(res); for (int i = 0; i < res.size(); i++) { assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); } @@ -3802,7 +3802,7 @@ public void testRegionScanner_Next() throws IOException { expected2.add(new KeyValue(row2, fam4, null, ts, KeyValue.Type.Put, null)); res = new ArrayList<>(); - is.next((List) res); + is.next(res); for (int i = 0; i < res.size(); i++) { assertTrue(PrivateCellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); } @@ -3909,7 +3909,7 @@ public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws I scan.readVersions(MAX_VERSIONS); List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next((List) actual); + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); // Verify result @@ -3986,7 +3986,7 @@ public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() scan.readVersions(versions); List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next((List) actual); + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); // Verify result @@ -4096,7 +4096,7 @@ public void testScanner_Wildcard_FromFilesOnly_EnforceVersions() throws IOExcept scan.readVersions(MAX_VERSIONS); List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next((List) actual); + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); // Verify result @@ -4212,7 +4212,7 @@ public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions() throws I scan.readVersions(versions); List actual = new ArrayList<>(); try (InternalScanner scanner = region.getScanner(scan)) { - boolean hasNext = scanner.next((List) actual); + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); // Verify result diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 13f2101c0040..80e096632236 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -2576,7 +2576,7 @@ public void testClearSnapshotGetScannerConcurrently() throws Exception { assertNull(segmentScanner.next()); } else { List results = new ArrayList<>(); - storeScanner.next((List) results); + storeScanner.next(results); assertEquals(2, results.size()); PrivateCellUtil.equals(smallCell, results.get(0)); PrivateCellUtil.equals(largeCell, results.get(1)); @@ -2713,7 +2713,7 @@ public void testMemoryLeakWhenFlushMemStoreRetrying() throws Exception { assertTrue(storeScanner.currentScanners.get(0) instanceof StoreFileScanner); List results = new ArrayList<>(); - storeScanner.next((List) results); + storeScanner.next(results); assertEquals(2, results.size()); PrivateCellUtil.equals(smallCell, results.get(0)); PrivateCellUtil.equals(largeCell, results.get(1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index fdaa19dc3e5a..85503a56e095 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -351,7 +351,7 @@ public void testRawScan() throws Exception { s.readAllVersions(); InternalScanner scan = region.getScanner(s); List kvs = new ArrayList<>(); - scan.next((List) kvs); + scan.next(kvs); assertEquals(8, kvs.size()); assertTrue(PrivateCellUtil.isDeleteFamily(kvs.get(0))); assertArrayEquals(CellUtil.cloneValue(kvs.get(1)), T3); @@ -370,7 +370,7 @@ public void testRawScan() throws Exception { s.setTimeRange(0, 1); scan = region.getScanner(s); kvs = new ArrayList<>(); - scan.next((List) kvs); + scan.next(kvs); // nothing in this interval, not even delete markers assertTrue(kvs.isEmpty()); @@ -381,7 +381,7 @@ public void testRawScan() throws Exception { s.setTimeRange(0, ts + 2); scan = region.getScanner(s); kvs = new ArrayList<>(); - scan.next((List) kvs); + scan.next(kvs); assertEquals(4, kvs.size()); assertTrue(PrivateCellUtil.isDeleteFamily(kvs.get(0))); assertArrayEquals(CellUtil.cloneValue(kvs.get(1)), T1); @@ -396,7 +396,7 @@ public void testRawScan() throws Exception { s.setTimeRange(ts + 3, ts + 5); scan = region.getScanner(s); kvs = new ArrayList<>(); - scan.next((List) kvs); + scan.next(kvs); assertEquals(2, kvs.size()); assertArrayEquals(CellUtil.cloneValue(kvs.get(0)), T3); assertTrue(CellUtil.isDelete(kvs.get(1))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 0db4175916df..1de37bcb018f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -218,7 +218,7 @@ public void testMultiColumnScanner() throws IOException { String queryInfo = "columns queried: " + qualSet + " (columnBitMask=" + columnBitMask + "), maxVersions=" + maxVersions; - while (scanner.next((List) results) || results.size() > 0) { + while (scanner.next(results) || results.size() > 0) { for (ExtendedCell kv : results) { while ( kvPos < kvs.size() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java index 9c6b947793b4..ce07308c2e79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -569,7 +570,8 @@ private static class HeartbeatReversedRegionScanner extends ReversedRegionScanne } @Override - public boolean nextRaw(List outResults, ScannerContext context) throws IOException { + public boolean nextRaw(List outResults, ScannerContext context) + throws IOException { boolean moreRows = super.nextRaw(outResults, context); HeartbeatHRegion.rowSleep(); return moreRows; @@ -598,7 +600,8 @@ private static class HeartbeatRegionScanner extends RegionScannerImpl { } @Override - public boolean nextRaw(List outResults, ScannerContext context) throws IOException { + public boolean nextRaw(List outResults, ScannerContext context) + throws IOException { boolean moreRows = super.nextRaw(outResults, context); HeartbeatHRegion.rowSleep(); return moreRows; @@ -630,7 +633,8 @@ public HeartbeatKVHeap(List scanners, CellComparator } @Override - public boolean next(List result, ScannerContext context) throws IOException { + public boolean next(List result, ScannerContext context) + throws IOException { if (HeartbeatHRegion.sleepBeforeColumnFamily) HeartbeatHRegion.columnFamilySleep(); boolean moreRows = super.next(result, context); if (!HeartbeatHRegion.sleepBeforeColumnFamily) HeartbeatHRegion.columnFamilySleep(); @@ -649,7 +653,8 @@ public HeartbeatReversedKVHeap(List scanners, } @Override - public boolean next(List result, ScannerContext context) throws IOException { + public boolean next(List result, ScannerContext context) + throws IOException { if (HeartbeatHRegion.sleepBeforeColumnFamily) HeartbeatHRegion.columnFamilySleep(); boolean moreRows = super.next(result, context); if (!HeartbeatHRegion.sleepBeforeColumnFamily) HeartbeatHRegion.columnFamilySleep(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index 028de5066c80..94f27d24492e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -39,7 +39,7 @@ import java.util.List; import java.util.TreeMap; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.regionserver.BloomType; @@ -202,7 +202,8 @@ public Scanner(KeyValue... kvs) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { if (kvs.isEmpty()) { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 295d0cc4c2fc..d598d0cfdb77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -49,8 +49,8 @@ import java.util.OptionalLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; @@ -931,7 +931,8 @@ public Scanner(KeyValue... kvs) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { if (kvs.isEmpty()) { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index 6b4a87b883f9..9e6d7d651bf8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -286,7 +287,8 @@ private Predicate checkVersion(Cell firstCell, int version) { } @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { boolean moreRows = scanner.next(result, scannerContext); if (result.isEmpty()) { return moreRows; @@ -297,7 +299,7 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE predicate = checkTtl(now, ttl); } if (version != null) { - Predicate vp = checkVersion(result.get(0), version); + Predicate vp = checkVersion((Cell) result.get(0), version); if (predicate != null) { predicate = predicate.and(vp); } else { @@ -305,7 +307,7 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE } } if (predicate != null) { - result.removeIf(predicate); + ((List) result).removeIf(predicate); } return moreRows; } From 9dccc6795237239994afc6cd2cb7340606f8c6ec Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Wed, 31 Jul 2024 21:21:05 +0800 Subject: [PATCH 490/514] HBASE-28389 HBase backup yarn queue parameter ignored (#6131) Signed-off-by: Duo Zhang Signed-off-by: Nihal Jain --- .../apache/hadoop/hbase/backup/RestoreDriver.java | 4 ++-- .../hadoop/hbase/backup/impl/BackupCommands.java | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index cb01469c8f18..38b767ecf67e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -124,8 +124,8 @@ private int parseAndRun() throws IOException { if (cmd.hasOption(OPTION_YARN_QUEUE_NAME)) { String queueName = cmd.getOptionValue(OPTION_YARN_QUEUE_NAME); - // Set system property value for MR job - System.setProperty("mapreduce.job.queuename", queueName); + // Set MR job queuename to configuration + getConf().set("mapreduce.job.queuename", queueName); } // parse main restore command options diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 3bb3ed33f34d..66694f4384f4 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -142,6 +142,12 @@ public void execute() throws IOException { throw new IOException(INCORRECT_USAGE); } + if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) { + String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME); + // Set MR job queuename to configuration + getConf().set("mapreduce.job.queuename", queueName); + } + // Create connection conn = ConnectionFactory.createConnection(getConf()); if (requiresNoActiveSession()) { @@ -333,12 +339,6 @@ public void execute() throws IOException { boolean ignoreChecksum = cmdline.hasOption(OPTION_IGNORECHECKSUM); - if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) { - String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME); - // Set system property value for MR job - System.setProperty("mapreduce.job.queuename", queueName); - } - try (BackupAdminImpl admin = new BackupAdminImpl(conn)) { BackupRequest.Builder builder = new BackupRequest.Builder(); BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase())) From edbb145a3f0c6847f444fefdadea5b82d02b0bdb Mon Sep 17 00:00:00 2001 From: Liangjun He Date: Fri, 2 Aug 2024 22:38:43 +0800 Subject: [PATCH 491/514] HBASE-28648 Change the deprecation cycle for RegionObserver.postInstantiateDeleteTracker (#6133) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/coprocessor/RegionObserver.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 21cabcec1f8c..542888e4aedc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -1567,7 +1567,8 @@ default List> postAppendBeforeWAL( * @param ctx the environment provided by the region server * @param delTracker the deleteTracker that is created by the QueryMatcher * @return the Delete Tracker - * @deprecated Since 2.0 with out any replacement and will be removed in 3.0 + * @deprecated Since 2.0.0, will be removed in 4.0.0. Visibility label feature still use this + * method, so it can not be removed in 3.0.0 */ @Deprecated default DeleteTracker postInstantiateDeleteTracker( From 158b6d0af593604bcbc9b2d0e815198e032928c4 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Tue, 6 Aug 2024 06:47:41 -0400 Subject: [PATCH 492/514] HBASE-28346: Expose checkQuota to Coprocessor Endpoints (#6066) Signed-off-by: Nick Dimiduk --- .../RegionCoprocessorEnvironment.java | 52 ++++++++ .../hbase/quotas/DefaultOperationQuota.java | 6 + .../hbase/quotas/NoopOperationQuota.java | 6 + .../hadoop/hbase/quotas/OperationQuota.java | 7 + .../apache/hadoop/hbase/quotas/QuotaUtil.java | 8 ++ .../quotas/RegionServerRpcQuotaManager.java | 44 +------ .../hadoop/hbase/quotas/RpcQuotaManager.java | 92 +++++++++++++ .../regionserver/RegionCoprocessorHost.java | 43 ++++++ .../TestRegionCoprocessorQuotaUsage.java | 122 ++++++++++++++++++ 9 files changed, 342 insertions(+), 38 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RpcQuotaManager.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index e3232db909c2..1bac7a068bf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -26,7 +26,11 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.metrics.MetricRegistry; +import org.apache.hadoop.hbase.quotas.OperationQuota; +import org.apache.hadoop.hbase.quotas.RpcQuotaManager; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; import org.apache.hadoop.hbase.regionserver.OnlineRegions; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.yetus.audience.InterfaceAudience; @@ -120,4 +124,52 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment results) { operationSize[OperationType.SCAN.ordinal()] += QuotaUtil.calculateResultSize(results); } + @Override + public void addScanResultCells(final List cells) { + operationSize[OperationType.SCAN.ordinal()] += QuotaUtil.calculateCellsSize(cells); + } + @Override public void addMutation(final Mutation mutation) { operationSize[OperationType.MUTATE.ordinal()] += QuotaUtil.calculateMutationSize(mutation); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java index 736560e6fd17..63cf97188d86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.quotas; import java.util.List; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Result; import org.apache.yetus.audience.InterfaceAudience; @@ -81,4 +82,9 @@ public long getReadAvailable() { public long getReadConsumed() { return 0L; } + + @Override + public void addScanResultCells(List cells) { + // no-op + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java index ef0a35fa5892..0d9b48b6074b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.quotas; import java.util.List; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Result; import org.apache.yetus.audience.InterfaceAudience; @@ -88,6 +89,12 @@ void checkScanQuota(ClientProtos.ScanRequest scanRequest, long maxScannerResultS */ void addScanResult(List results); + /** + * Add a scan result in the form of cells. This will be used to calculate the exact quota and have + * a better long-read average size for the next time. + */ + void addScanResultCells(List cells); + /** * Add a mutation result. This will be used to calculate the exact quota and have a better * mutation average size for the next time. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 0da1aa661658..8e267d4e8bf6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -591,6 +591,14 @@ public static long calculateResultSize(final List results) { return size; } + public static long calculateCellsSize(final List cells) { + long size = 0; + for (Cell cell : cells) { + size += cell.getSerializedSize(); + } + return size; + } + /** * Method to enable a table, if not already enabled. This method suppresses * {@link TableNotDisabledException} and {@link TableNotFoundException}, if thrown while enabling diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 92a0cfd5c135..f9a7ccba401b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -43,7 +43,7 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class RegionServerRpcQuotaManager { +public class RegionServerRpcQuotaManager implements RpcQuotaManager { private static final Logger LOG = LoggerFactory.getLogger(RegionServerRpcQuotaManager.class); private final RegionServerServices rsServices; @@ -154,21 +154,7 @@ public OperationQuota getQuota(final UserGroupInformation ugi, final TableName t return NoopOperationQuota.get(); } - /** - * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the - * available quota and to report the data/usage of the operation. This method is specific to scans - * because estimating a scan's workload is more complicated than estimating the workload of a - * get/put. - * @param region the region where the operation will be performed - * @param scanRequest the scan to be estimated against the quota - * @param maxScannerResultSize the maximum bytes to be returned by the scanner - * @param maxBlockBytesScanned the maximum bytes scanned in a single RPC call by the - * scanner - * @param prevBlockBytesScannedDifference the difference between BBS of the previous two next - * calls - * @return the OperationQuota - * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. - */ + @Override public OperationQuota checkScanQuota(final Region region, final ClientProtos.ScanRequest scanRequest, long maxScannerResultSize, long maxBlockBytesScanned, long prevBlockBytesScannedDifference) @@ -195,16 +181,7 @@ public OperationQuota checkScanQuota(final Region region, return quota; } - /** - * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the - * available quota and to report the data/usage of the operation. This method does not support - * scans because estimating a scan's workload is more complicated than estimating the workload of - * a get/put. - * @param region the region where the operation will be performed - * @param type the operation type - * @return the OperationQuota - * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. - */ + @Override public OperationQuota checkBatchQuota(final Region region, final OperationQuota.OperationType type) throws IOException, RpcThrottlingException { switch (type) { @@ -218,17 +195,7 @@ public OperationQuota checkBatchQuota(final Region region, throw new RuntimeException("Invalid operation type: " + type); } - /** - * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the - * available quota and to report the data/usage of the operation. This method does not support - * scans because estimating a scan's workload is more complicated than estimating the workload of - * a get/put. - * @param region the region where the operation will be performed - * @param actions the "multi" actions to perform - * @param hasCondition whether the RegionAction has a condition - * @return the OperationQuota - * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. - */ + @Override public OperationQuota checkBatchQuota(final Region region, final List actions, boolean hasCondition) throws IOException, RpcThrottlingException { @@ -258,7 +225,8 @@ public OperationQuota checkBatchQuota(final Region region, * @return the OperationQuota * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. */ - private OperationQuota checkBatchQuota(final Region region, final int numWrites, + @Override + public OperationQuota checkBatchQuota(final Region region, final int numWrites, final int numReads) throws IOException, RpcThrottlingException { Optional user = RpcServer.getRequestUser(); UserGroupInformation ugi; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RpcQuotaManager.java new file mode 100644 index 000000000000..60392ca3b3f6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RpcQuotaManager.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface RpcQuotaManager { + + /** + * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the + * available quota and to report the data/usage of the operation. This method is specific to scans + * because estimating a scan's workload is more complicated than estimating the workload of a + * get/put. + * @param region the region where the operation will be performed + * @param scanRequest the scan to be estimated against the quota + * @param maxScannerResultSize the maximum bytes to be returned by the scanner + * @param maxBlockBytesScanned the maximum bytes scanned in a single RPC call by the + * scanner + * @param prevBlockBytesScannedDifference the difference between BBS of the previous two next + * calls + * @return the OperationQuota + * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. + */ + OperationQuota checkScanQuota(final Region region, final ClientProtos.ScanRequest scanRequest, + long maxScannerResultSize, long maxBlockBytesScanned, long prevBlockBytesScannedDifference) + throws IOException, RpcThrottlingException; + + /** + * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the + * available quota and to report the data/usage of the operation. This method does not support + * scans because estimating a scan's workload is more complicated than estimating the workload of + * a get/put. + * @param region the region where the operation will be performed + * @param type the operation type + * @return the OperationQuota + * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. + */ + OperationQuota checkBatchQuota(final Region region, final OperationQuota.OperationType type) + throws IOException, RpcThrottlingException; + + /** + * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the + * available quota and to report the data/usage of the operation. This method does not support + * scans because estimating a scan's workload is more complicated than estimating the workload of + * a get/put. + * @param region the region where the operation will be performed + * @param actions the "multi" actions to perform + * @param hasCondition whether the RegionAction has a condition + * @return the OperationQuota + * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. + */ + OperationQuota checkBatchQuota(final Region region, final List actions, + boolean hasCondition) throws IOException, RpcThrottlingException; + + /** + * Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the + * available quota and to report the data/usage of the operation. This method does not support + * scans because estimating a scan's workload is more complicated than estimating the workload of + * a get/put. + * @param region the region where the operation will be performed + * @param numWrites number of writes to count against quota + * @param numReads number of reads to count against quota + * @return the OperationQuota + * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. + */ + OperationQuota checkBatchQuota(final Region region, int numWrites, int numReads) + throws IOException, RpcThrottlingException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 398c596b63f8..c4e68c234077 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -65,6 +65,9 @@ import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.metrics.MetricRegistry; +import org.apache.hadoop.hbase.quotas.OperationQuota; +import org.apache.hadoop.hbase.quotas.RpcQuotaManager; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; @@ -83,6 +86,9 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.AbstractReferenceMap; import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.ReferenceMap; +import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /** * Implements the coprocessor environment and runtime support for coprocessors loaded within a * {@link Region}. @@ -116,6 +122,7 @@ private static class RegionEnvironment extends BaseEnvironment sharedData; private final MetricRegistry metricRegistry; private final RegionServerServices services; + private final RpcQuotaManager rpcQuotaManager; /** * Constructor @@ -131,6 +138,13 @@ public RegionEnvironment(final RegionCoprocessor impl, final int priority, final this.services = services; this.metricRegistry = MetricsCoprocessor.createRegistryForRegionCoprocessor(impl.getClass().getName()); + // Some unit tests reach this line with services == null, and are okay with rpcQuotaManager + // being null. Let these unit tests succeed. This should not happen in real usage. + if (services != null) { + this.rpcQuotaManager = services.getRegionServerRpcQuotaManager(); + } else { + this.rpcQuotaManager = null; + } } /** Returns the region */ @@ -186,6 +200,35 @@ public RawCellBuilder getCellBuilder() { // We always do a DEEP_COPY only return RawCellBuilderFactory.create(); } + + @Override + public RpcQuotaManager getRpcQuotaManager() { + return rpcQuotaManager; + } + + @Override + public OperationQuota checkScanQuota(Scan scan, long maxBlockBytesScanned, + long prevBlockBytesScannedDifference) throws IOException, RpcThrottlingException { + ClientProtos.ScanRequest scanRequest = RequestConverter + .buildScanRequest(region.getRegionInfo().getRegionName(), scan, scan.getCaching(), false); + long maxScannerResultSize = + services.getConfiguration().getLong(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); + return rpcQuotaManager.checkScanQuota(region, scanRequest, maxScannerResultSize, + maxBlockBytesScanned, prevBlockBytesScannedDifference); + } + + @Override + public OperationQuota checkBatchQuota(Region region, OperationQuota.OperationType type) + throws IOException, RpcThrottlingException { + return rpcQuotaManager.checkBatchQuota(region, type); + } + + @Override + public OperationQuota checkBatchQuota(final Region region, int numWrites, int numReads) + throws IOException, RpcThrottlingException { + return rpcQuotaManager.checkBatchQuota(region, numWrites, numReads); + } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java new file mode 100644 index 000000000000..eeb3eb8bb0f3 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.quotas.OperationQuota; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.hadoop.hbase.testclassification.CoprocessorTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MediumTests.class, CoprocessorTests.class }) +public class TestRegionCoprocessorQuotaUsage { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionCoprocessorQuotaUsage.class); + + private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); + private static TableName TABLE_NAME = TableName.valueOf("TestRegionCoprocessorQuotaUsage"); + private static byte[] CF = Bytes.toBytes("CF"); + private static byte[] CQ = Bytes.toBytes("CQ"); + private static Connection CONN; + private static Table TABLE; + private static AtomicBoolean THROTTLING_OCCURRED = new AtomicBoolean(false); + + public static class MyRegionObserver implements RegionObserver { + @Override + public void preGetOp(ObserverContext c, Get get, + List result) throws IOException { + + // For the purposes of this test, we only need to catch a throttle happening once, then + // let future requests pass through so we don't make this test take any longer than necessary + if (!THROTTLING_OCCURRED.get()) { + try { + c.getEnvironment().checkBatchQuota(c.getEnvironment().getRegion(), + OperationQuota.OperationType.GET); + } catch (RpcThrottlingException e) { + THROTTLING_OCCURRED.set(true); + throw e; + } + } + } + } + + public static class MyCoprocessor implements RegionCoprocessor { + RegionObserver observer = new MyRegionObserver(); + + @Override + public Optional getRegionObserver() { + return Optional.of(observer); + } + } + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.setBoolean("hbase.quota.enabled", true); + conf.setInt("hbase.quota.default.user.machine.read.num", 2); + conf.set("hbase.quota.rate.limiter", "org.apache.hadoop.hbase.quotas.FixedIntervalRateLimiter"); + conf.set("hbase.quota.rate.limiter.refill.interval.ms", "300000"); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MyCoprocessor.class.getName()); + UTIL.startMiniCluster(3); + byte[][] splitKeys = new byte[8][]; + for (int i = 111; i < 999; i += 111) { + splitKeys[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i)); + } + UTIL.createTable(TABLE_NAME, CF, splitKeys); + CONN = UTIL.getConnection(); + TABLE = CONN.getTable(TABLE_NAME); + TABLE.put(new Put(Bytes.toBytes(String.format("%d", 0))).addColumn(CF, CQ, Bytes.toBytes(0L))); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testGet() throws InterruptedException, ExecutionException, IOException { + // Hit the table 5 times which ought to be enough to make a throttle happen + for (int i = 0; i < 5; i++) { + TABLE.get(new Get(Bytes.toBytes("000"))); + } + assertTrue("Throttling did not happen as expected", THROTTLING_OCCURRED.get()); + } +} From be2cf58bee7ddeb134bb97ce0777ab2bb556af62 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 6 Aug 2024 22:20:22 +0800 Subject: [PATCH 493/514] HBASE-28760 Exclude pom file of jaxws-ri in output tarball (#6135) Signed-off-by: Nihal Jain --- hbase-assembly/src/main/assembly/hadoop-three-compat.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-assembly/src/main/assembly/hadoop-three-compat.xml b/hbase-assembly/src/main/assembly/hadoop-three-compat.xml index 244de766ce72..84a1bfa36816 100644 --- a/hbase-assembly/src/main/assembly/hadoop-three-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-three-compat.xml @@ -65,6 +65,8 @@ + + com.sun.xml.ws:jaxws-ri:pom org.jruby:jruby-complete com.sun.jersey:* From 6788ff48baef51646fd61cabc37eeb9bf11d6058 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Thu, 8 Aug 2024 19:25:37 +0530 Subject: [PATCH 494/514] HBASE-28250 Bump jruby to 9.4.8.0 to fix snakeyaml CVE (#6127) * Sync code as per irb 1.4.2 * Also provide option to try irb's new functionalities for colorize and autocomplete Signed-off-by: Duo Zhang Signed-off-by: Nick Dimiduk --- hbase-shell/src/main/ruby/irb/hirb.rb | 98 +++++++++++----------- hbase-shell/src/main/ruby/jar-bootstrap.rb | 12 +++ pom.xml | 2 +- 3 files changed, 63 insertions(+), 49 deletions(-) diff --git a/hbase-shell/src/main/ruby/irb/hirb.rb b/hbase-shell/src/main/ruby/irb/hirb.rb index 713cb848c764..73b0ee91a11d 100644 --- a/hbase-shell/src/main/ruby/irb/hirb.rb +++ b/hbase-shell/src/main/ruby/irb/hirb.rb @@ -53,18 +53,21 @@ def initialize(workspace = nil, interactive = true, input_method = nil) $stdout = STDOUT end - def output_value + def output_value(omit = false) # Suppress output if last_value is 'nil' # Otherwise, when user types help, get ugly 'nil' # after all output. - super unless @context.last_value.nil? + super(omit) unless @context.last_value.nil? end - # Copied from irb.rb and overrides the rescue Exception block so the + # Copied from https://github.com/ruby/irb/blob/v1.4.2/lib/irb.rb + # We override the rescue Exception block so the # Shell::exception_handler can deal with the exceptions. def eval_input + exc = nil + @scanner.set_prompt do - |ltype, indent, continue, line_no| + |ltype, indent, continue, line_no| if ltype f = @context.prompt_s elsif continue @@ -80,17 +83,19 @@ def eval_input else @context.io.prompt = p = "" end - if @context.auto_indent_mode + if @context.auto_indent_mode and !@context.io.respond_to?(:auto_indent) unless ltype - ind = prompt(@context.prompt_i, ltype, indent, line_no)[/.*\z/].size + + prompt_i = @context.prompt_i.nil? ? "" : @context.prompt_i + ind = prompt(prompt_i, ltype, indent, line_no)[/.*\z/].size + indent * 2 - p.size ind += 2 if continue @context.io.prompt = p + " " * ind if ind > 0 end end + @context.io.prompt end - @scanner.set_input(@context.io) do + @scanner.set_input(@context.io, context: @context) do signal_status(:IN_INPUT) do if l = @context.io.gets print l if @context.verbose? @@ -101,24 +106,51 @@ def eval_input printf "Use \"exit\" to leave %s\n", @context.ap_name end else - print "\n" + print "\n" if @context.prompting? end end l end end + @scanner.set_auto_indent(@context) if @context.auto_indent_mode + @scanner.each_top_level_statement do |line, line_no| signal_status(:IN_EVAL) do begin - line.untaint - @context.evaluate(line, line_no) - output_value if @context.echo? - exc = nil + line.untaint if RUBY_VERSION < '2.7' + if IRB.conf[:MEASURE] && IRB.conf[:MEASURE_CALLBACKS].empty? + IRB.set_measure_callback + end + if IRB.conf[:MEASURE] && !IRB.conf[:MEASURE_CALLBACKS].empty? + result = nil + last_proc = proc{ result = @context.evaluate(line, line_no, exception: exc) } + IRB.conf[:MEASURE_CALLBACKS].inject(last_proc) { |chain, item| + _name, callback, arg = item + proc { + callback.(@context, line, line_no, arg, exception: exc) do + chain.call + end + } + }.call + @context.set_last_value(result) + else + @context.evaluate(line, line_no, exception: exc) + end + if @context.echo? + if assignment_expression?(line) + if @context.echo_on_assignment? + output_value(@context.echo_on_assignment? == :truncate) + end + else + output_value + end + end rescue Interrupt => exc rescue SystemExit, SignalException raise rescue SyntaxError => exc + # HBASE-27726: Ignore SyntaxError to prevent exiting Shell on unexpected syntax. raise exc unless @interactive rescue NameError => exc raise exc unless @interactive @@ -128,43 +160,13 @@ def eval_input # This modifies this copied method from JRuby so that the HBase shell can # manage the exception and set a proper exit code on the process. raise exc + else + exc = nil + next end - if exc - if exc.backtrace && exc.backtrace[0] =~ /irb(2)?(\/.*|-.*|\.rb)?:/ && exc.class.to_s !~ /^IRB/ && - !(SyntaxError === exc) - irb_bug = true - else - irb_bug = false - end - - messages = [] - lasts = [] - levels = 0 - if exc.backtrace - count = 0 - exc.backtrace.each do |m| - m = @context.workspace.filter_backtrace(m) or next unless irb_bug - m = sprintf("%9d: from %s", (count += 1), m) - if messages.size < @context.back_trace_limit - messages.push(m) - elsif lasts.size < @context.back_trace_limit - lasts.push(m).shift - levels += 1 - end - end - end - attr = STDOUT.tty? ? ATTR_TTY : ATTR_PLAIN - print "#{attr[1]}Traceback#{attr[]} (most recent call last):\n" - unless lasts.empty? - puts lasts.reverse - printf "... %d levels...\n", levels if levels > 0 - end - puts messages.reverse - messages = exc.to_s.split(/\n/) - print "#{attr[1]}#{exc.class} (#{attr[4]}#{messages.shift}#{attr[0, 1]})#{attr[]}\n" - puts messages.map {|s| "#{attr[1]}#{s}#{attr[]}\n"} - print "Maybe IRB bug!\n" if irb_bug - end + handle_exception(exc) + @context.workspace.local_variable_set(:_, exc) + exc = nil end end end diff --git a/hbase-shell/src/main/ruby/jar-bootstrap.rb b/hbase-shell/src/main/ruby/jar-bootstrap.rb index 63cb0a755449..e9844cfb223f 100644 --- a/hbase-shell/src/main/ruby/jar-bootstrap.rb +++ b/hbase-shell/src/main/ruby/jar-bootstrap.rb @@ -68,6 +68,8 @@ -h | --help This help. -n | --noninteractive Do not run within an IRB session and exit with non-zero status on first error. + -c | --colorize Enable colorized output. + -a | --autocomplete Enable auto-completion. --top-level-defs Compatibility flag to export HBase shell commands onto Ruby's main object -Dkey=value Pass hbase-*.xml Configuration overrides. For example, to @@ -105,6 +107,8 @@ def add_to_configuration(c, arg) ['--help', '-h', GetoptLong::NO_ARGUMENT], ['--debug', '-d', GetoptLong::NO_ARGUMENT], ['--noninteractive', '-n', GetoptLong::NO_ARGUMENT], + ['--colorize', '-c', GetoptLong::NO_ARGUMENT], + ['--autocomplete', '-a', GetoptLong::NO_ARGUMENT], ['--top-level-defs', GetoptLong::NO_ARGUMENT], ['-D', GetoptLong::REQUIRED_ARGUMENT], ['--return-values', '-r', GetoptLong::NO_ARGUMENT] @@ -115,6 +119,8 @@ def add_to_configuration(c, arg) log_level = 'ERROR' @shell_debug = false interactive = true +colorize = false +autocomplete = false full_backtrace = false top_level_definitions = false @@ -132,6 +138,10 @@ def add_to_configuration(c, arg) puts 'Setting DEBUG log level...' when '--noninteractive' interactive = false + when '--colorize' + colorize = true + when '--autocomplete' + autocomplete = true when '--return-values' warn '[INFO] the -r | --return-values option is ignored. we always behave '\ 'as though it was given.' @@ -213,6 +223,8 @@ def debug? IRB.conf[:AP_NAME] = 'hbase' IRB.conf[:PROMPT_MODE] = :CUSTOM IRB.conf[:BACK_TRACE_LIMIT] = 0 unless full_backtrace +IRB.conf[:USE_AUTOCOMPLETE] = autocomplete +IRB.conf[:USE_COLORIZE] = colorize # Create a workspace we'll use across sessions. workspace = @shell.get_workspace diff --git a/pom.xml b/pom.xml index 801d4a061670..cc83c8d5aeb8 100644 --- a/pom.xml +++ b/pom.xml @@ -859,7 +859,7 @@ 2.1.1 2.3.2 3.0.1-b08 - 9.3.13.0 + 9.4.8.0 4.13.2 1.3 1.15.0 From 97de2912396d303b78a0d85552953e3e9955a145 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andor=20Moln=C3=A1r?= Date: Tue, 13 Aug 2024 12:54:29 -0500 Subject: [PATCH 495/514] HBASE-27118 Add security headers to Thrift/HTTP server (#5864) Signed-off-by: Duo Zhang Signed-off-by: Pankaj Signed-off-by: Istvan Toth --- .../hadoop/hbase/http/HttpServerUtil.java | 25 +++ .../apache/hadoop/hbase/rest/RESTServer.java | 28 +-- hbase-thrift/pom.xml | 5 + .../hadoop/hbase/thrift/ThriftServer.java | 8 +- .../hbase/thrift/TestThriftHttpServerSSL.java | 212 ++++++++++++++++++ 5 files changed, 252 insertions(+), 26 deletions(-) create mode 100644 hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServerSSL.java diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java index 686f0861f25a..ecfb32742fd1 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.hbase.http; +import java.util.EnumSet; +import javax.servlet.DispatcherType; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.org.eclipse.jetty.security.ConstraintMapping; import org.apache.hbase.thirdparty.org.eclipse.jetty.security.ConstraintSecurityHandler; +import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.FilterHolder; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletContextHandler; import org.apache.hbase.thirdparty.org.eclipse.jetty.util.security.Constraint; @@ -29,6 +33,9 @@ */ @InterfaceAudience.Private public final class HttpServerUtil { + + public static final String PATH_SPEC_ANY = "/*"; + /** * Add constraints to a Jetty Context to disallow undesirable Http methods. * @param ctxHandler The context to modify @@ -59,6 +66,24 @@ public static void constrainHttpMethods(ServletContextHandler ctxHandler, ctxHandler.setSecurityHandler(securityHandler); } + public static void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, + Configuration conf, String pathSpec) { + FilterHolder holder = new FilterHolder(); + holder.setName("clickjackingprevention"); + holder.setClassName(ClickjackingPreventionFilter.class.getName()); + holder.setInitParameters(ClickjackingPreventionFilter.getDefaultParameters(conf)); + ctxHandler.addFilter(holder, pathSpec, EnumSet.allOf(DispatcherType.class)); + } + + public static void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf, + boolean isSecure, String pathSpec) { + FilterHolder holder = new FilterHolder(); + holder.setName("securityheaders"); + holder.setClassName(SecurityHeadersFilter.class.getName()); + holder.setInitParameters(SecurityHeadersFilter.getDefaultParameters(conf, isSecure)); + ctxHandler.addFilter(holder, pathSpec, EnumSet.allOf(DispatcherType.class)); + } + private HttpServerUtil() { } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 42c00480526b..760f2ca8b41c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.rest; +import static org.apache.hadoop.hbase.http.HttpServerUtil.PATH_SPEC_ANY; + import java.lang.management.ManagementFactory; import java.net.UnknownHostException; import java.util.ArrayList; @@ -31,10 +33,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.http.ClickjackingPreventionFilter; import org.apache.hadoop.hbase.http.HttpServerUtil; import org.apache.hadoop.hbase.http.InfoServer; -import org.apache.hadoop.hbase.http.SecurityHeadersFilter; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.rest.filter.AuthFilter; import org.apache.hadoop.hbase.rest.filter.GzipFilter; @@ -99,8 +99,6 @@ public class RESTServer implements Constants { static final String HTTP_HEADER_CACHE_SIZE = "hbase.rest.http.header.cache.size"; static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE - 1; - private static final String PATH_SPEC_ANY = "/*"; - static final String REST_HTTP_ALLOW_OPTIONS_METHOD = "hbase.rest.http.allow.options.method"; // HTTP OPTIONS method is commonly used in REST APIs for negotiation. So it is enabled by default. private static boolean REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT = true; @@ -144,24 +142,6 @@ void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { } } - private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, - Configuration conf) { - FilterHolder holder = new FilterHolder(); - holder.setName("clickjackingprevention"); - holder.setClassName(ClickjackingPreventionFilter.class.getName()); - holder.setInitParameters(ClickjackingPreventionFilter.getDefaultParameters(conf)); - ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); - } - - private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf, - boolean isSecure) { - FilterHolder holder = new FilterHolder(); - holder.setName("securityheaders"); - holder.setClassName(SecurityHeadersFilter.class.getName()); - holder.setInitParameters(SecurityHeadersFilter.getDefaultParameters(conf, isSecure)); - ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); - } - // login the server principal (if using secure Hadoop) private static Pair> loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception { @@ -397,8 +377,8 @@ public synchronized void run() throws Exception { ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); } addCSRFFilter(ctxHandler, conf); - addClickjackingPreventionFilter(ctxHandler, conf); - addSecurityHeadersFilter(ctxHandler, conf, isSecure); + HttpServerUtil.addClickjackingPreventionFilter(ctxHandler, conf, PATH_SPEC_ANY); + HttpServerUtil.addSecurityHeadersFilter(ctxHandler, conf, isSecure, PATH_SPEC_ANY); HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration() .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml index 720eecb3b4c2..90e40f96e7a4 100644 --- a/hbase-thrift/pom.xml +++ b/hbase-thrift/pom.xml @@ -150,6 +150,11 @@ log4j-1.2-api test + + org.bouncycastle + bcprov-jdk18on + test + diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java index 81887034aea9..7f2d37440297 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.thrift; +import static org.apache.hadoop.hbase.http.HttpServerUtil.PATH_SPEC_ANY; import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_DEAFULT; import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_KEY; import static org.apache.hadoop.hbase.thrift.Constants.BIND_CONF_KEY; @@ -387,9 +388,12 @@ protected void setupHTTPServer() throws IOException { httpServer = new Server(threadPool); // Context handler + boolean isSecure = conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false); ServletContextHandler ctxHandler = new ServletContextHandler(httpServer, "/", ServletContextHandler.SESSIONS); - ctxHandler.addServlet(new ServletHolder(thriftHttpServlet), "/*"); + HttpServerUtil.addClickjackingPreventionFilter(ctxHandler, conf, PATH_SPEC_ANY); + HttpServerUtil.addSecurityHeadersFilter(ctxHandler, conf, isSecure, PATH_SPEC_ANY); + ctxHandler.addServlet(new ServletHolder(thriftHttpServlet), PATH_SPEC_ANY); HttpServerUtil.constrainHttpMethods(ctxHandler, conf.getBoolean(THRIFT_HTTP_ALLOW_OPTIONS_METHOD, THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); @@ -404,7 +408,7 @@ protected void setupHTTPServer() throws IOException { httpConfig.setSendDateHeader(false); ServerConnector serverConnector; - if (conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false)) { + if (isSecure) { HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig); httpsConfig.addCustomizer(new SecureRequestCustomizer()); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServerSSL.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServerSSL.java new file mode 100644 index 000000000000..998d979b50d0 --- /dev/null +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServerSSL.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.thrift; + +import static org.apache.hadoop.hbase.thrift.TestThriftServerCmdLine.createBoundServer; +import static org.junit.Assert.assertEquals; + +import java.io.BufferedInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Method; +import java.net.HttpURLConnection; +import java.nio.file.Files; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.cert.X509Certificate; +import javax.net.ssl.SSLContext; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.thrift.generated.Hbase; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; +import org.apache.hadoop.hbase.util.TableDescriptorChecker; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContexts; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TMemoryBuffer; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ ClientTests.class, LargeTests.class }) +public class TestThriftHttpServerSSL { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestThriftHttpServerSSL.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestThriftHttpServerSSL.class); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final String KEY_STORE_PASSWORD = "myKSPassword"; + private static final String TRUST_STORE_PASSWORD = "myTSPassword"; + + private File keyDir; + private HttpClientBuilder httpClientBuilder; + private ThriftServerRunner tsr; + private HttpPost httpPost = null; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean(Constants.USE_HTTP_CONF_KEY, true); + TEST_UTIL.getConfiguration().setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, false); + TEST_UTIL.startMiniCluster(); + // ensure that server time increments every time we do an operation, otherwise + // successive puts having the same timestamp will override each other + EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + EnvironmentEdgeManager.reset(); + } + + @Before + public void setUp() throws Exception { + initializeAlgorithmId(); + keyDir = initKeystoreDir(); + keyDir.deleteOnExit(); + KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); + + X509Certificate serverCertificate = + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + + generateTrustStore(serverCertificate); + generateKeyStore(keyPair, serverCertificate); + + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.setBoolean(Constants.THRIFT_SSL_ENABLED_KEY, true); + conf.set(Constants.THRIFT_SSL_KEYSTORE_STORE_KEY, getKeystoreFilePath()); + conf.set(Constants.THRIFT_SSL_KEYSTORE_PASSWORD_KEY, KEY_STORE_PASSWORD); + conf.set(Constants.THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY, KEY_STORE_PASSWORD); + + tsr = createBoundServer(() -> new ThriftServer(conf)); + String url = "https://" + HConstants.LOCALHOST + ":" + tsr.getThriftServer().listenPort; + + KeyStore trustStore; + trustStore = KeyStore.getInstance("JKS"); + try (InputStream inputStream = + new BufferedInputStream(Files.newInputStream(new File(getTruststoreFilePath()).toPath()))) { + trustStore.load(inputStream, TRUST_STORE_PASSWORD.toCharArray()); + } + + httpClientBuilder = HttpClients.custom(); + SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(trustStore, null).build(); + httpClientBuilder.setSSLContext(sslcontext); + + httpPost = new HttpPost(url); + httpPost.setHeader("Content-Type", "application/x-thrift"); + httpPost.setHeader("Accept", "application/x-thrift"); + httpPost.setHeader("User-Agent", "Java/THttpClient/HC"); + } + + @After + public void tearDown() throws IOException { + if (httpPost != null) { + httpPost.releaseConnection(); + } + if (tsr != null) { + tsr.close(); + } + } + + @Test + public void testSecurityHeaders() throws Exception { + try (CloseableHttpClient httpClient = httpClientBuilder.build()) { + TMemoryBuffer memoryBuffer = new TMemoryBuffer(100); + TProtocol prot = new TBinaryProtocol(memoryBuffer); + Hbase.Client client = new Hbase.Client(prot); + client.send_getClusterId(); + + httpPost.setEntity(new ByteArrayEntity(memoryBuffer.getArray())); + CloseableHttpResponse httpResponse = httpClient.execute(httpPost); + + assertEquals(HttpURLConnection.HTTP_OK, httpResponse.getStatusLine().getStatusCode()); + assertEquals("DENY", httpResponse.getFirstHeader("X-Frame-Options").getValue()); + + assertEquals("nosniff", httpResponse.getFirstHeader("X-Content-Type-Options").getValue()); + assertEquals("1; mode=block", httpResponse.getFirstHeader("X-XSS-Protection").getValue()); + + assertEquals("default-src https: data: 'unsafe-inline' 'unsafe-eval'", + httpResponse.getFirstHeader("Content-Security-Policy").getValue()); + assertEquals("max-age=63072000;includeSubDomains;preload", + httpResponse.getFirstHeader("Strict-Transport-Security").getValue()); + } + } + + // Workaround for jdk8 292 bug. See https://github.com/bcgit/bc-java/issues/941 + // Below is a workaround described in above URL. Issue fingered first in comments in + // HBASE-25920 Support Hadoop 3.3.1 + private static void initializeAlgorithmId() { + try { + Class algoId = Class.forName("sun.security.x509.AlgorithmId"); + Method method = algoId.getMethod("get", String.class); + method.setAccessible(true); + method.invoke(null, "PBEWithSHA1AndDESede"); + } catch (Exception e) { + LOG.warn("failed to initialize AlgorithmId", e); + } + } + + private File initKeystoreDir() { + String dataTestDir = TEST_UTIL.getDataTestDir().toString(); + File keystoreDir = new File(dataTestDir, TestThriftHttpServer.class.getSimpleName() + "_keys"); + keystoreDir.mkdirs(); + return keystoreDir; + } + + private void generateKeyStore(KeyPair keyPair, X509Certificate serverCertificate) + throws Exception { + String keyStorePath = getKeystoreFilePath(); + KeyStoreTestUtil.createKeyStore(keyStorePath, KEY_STORE_PASSWORD, KEY_STORE_PASSWORD, + "serverKS", keyPair.getPrivate(), serverCertificate); + } + + private void generateTrustStore(X509Certificate serverCertificate) throws Exception { + String trustStorePath = getTruststoreFilePath(); + KeyStoreTestUtil.createTrustStore(trustStorePath, TRUST_STORE_PASSWORD, "serverTS", + serverCertificate); + } + + private String getKeystoreFilePath() { + return String.format("%s/serverKS.%s", keyDir.getAbsolutePath(), "jks"); + } + + private String getTruststoreFilePath() { + return String.format("%s/serverTS.%s", keyDir.getAbsolutePath(), "jks"); + } +} From 98e7e1b9f037e6eefd38a16266317a17b1bd5895 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Fri, 16 Aug 2024 11:00:25 +0530 Subject: [PATCH 496/514] HBASE-28784 Exclude samples and release-documentation zip of jaxws-ri from output tarball (#6157) Signed-off-by: Duo Zhang --- pom.xml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pom.xml b/pom.xml index cc83c8d5aeb8..854e3cdb84b6 100644 --- a/pom.xml +++ b/pom.xml @@ -1809,6 +1809,14 @@ javax.activation javax.activation-api + + com.sun.xml.ws + release-documentation + + + com.sun.xml.ws + samples + From 41dd87cd908d4d089d0b8cff6c88c01ed60622c5 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Sat, 17 Aug 2024 12:09:30 +0530 Subject: [PATCH 497/514] HBASE-28786 Fix classname for command: copyreppeers in bin/hbase (#6162) Signed-off-by: Pankaj --- bin/hbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/hbase b/bin/hbase index ff0c57c055c7..937f7c22f2dd 100755 --- a/bin/hbase +++ b/bin/hbase @@ -763,7 +763,7 @@ elif [ "$COMMAND" = "hbtop" ] ; then elif [ "$COMMAND" = "credential" ] ; then CLASS='org.apache.hadoop.security.alias.CredentialShell' elif [ "$COMMAND" = "copyreppeers" ] ; then - CLASS='org.apache.hadoop.hbase.replication.ReplicationPeerMigrationTool' + CLASS='org.apache.hadoop.hbase.replication.CopyReplicationPeers' else CLASS=$COMMAND if [[ "$CLASS" =~ .*IntegrationTest.* ]] ; then From 0646151cd612389ce5ed3fac035a807525aea390 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 20 Aug 2024 14:28:42 -0700 Subject: [PATCH 498/514] HBASE-27746 Check if the file system supports storage policy before invoking setStoragePolicy() (#5189) --- .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java | 7 +++++++ .../java/org/apache/hadoop/hbase/util/TestFSUtils.java | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 800764954569..fe6f3bc238a9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.util; +import static org.apache.hadoop.fs.CommonPathCapabilities.FS_STORAGEPOLICY; + import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; @@ -515,6 +517,11 @@ private static void invokeSetStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy) throws IOException { Exception toThrow = null; + if (!fs.hasPathCapability(path, FS_STORAGEPOLICY)) { + LOG.debug("The file system does not support storage policy."); + return; + } + try { fs.setStoragePolicy(path, storagePolicy); LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index ffd9a0e0a389..8e2ce8dd8c11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -26,6 +26,8 @@ import java.io.File; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.util.List; import java.util.Random; import org.apache.hadoop.conf.Configuration; @@ -415,8 +417,9 @@ public void testSetStoragePolicyDefault() throws Exception { * Note: currently the default policy is set to defer to HDFS and this case is to verify the * logic, will need to remove the check if the default policy is changed */ - private void verifyNoHDFSApiInvocationForDefaultPolicy() { + private void verifyNoHDFSApiInvocationForDefaultPolicy() throws URISyntaxException, IOException { FileSystem testFs = new AlwaysFailSetStoragePolicyFileSystem(); + testFs.initialize(new URI("hdfs://localhost/"), conf); // There should be no exception thrown when setting to default storage policy, which indicates // the HDFS API hasn't been called try { From d2a1f191ccbede54e189696bc46a938a207e4e0a Mon Sep 17 00:00:00 2001 From: Umesh <9414umeshkumar@gmail.com> Date: Sun, 25 Aug 2024 20:36:20 +0530 Subject: [PATCH 499/514] HBASE-28690 Aborting Active HMaster is not rejecting reportRegionStateTransition if procedure is initialised by next Active master (#6129) Added masterActiveTime as fencing token for remote procedures Signed-off-by: Duo Zhang Reviewed-by: Aman Poonia --- .../hbase/shaded/protobuf/ProtobufUtil.java | 4 ++- .../procedure2/RemoteProcedureDispatcher.java | 10 +++++- .../server/master/RegionServerStatus.proto | 5 +++ .../main/protobuf/server/region/Admin.proto | 6 ++++ .../apache/hadoop/hbase/master/HMaster.java | 1 + .../hbase/master/MasterRpcServices.java | 34 +++++++++++++++++-- .../hadoop/hbase/master/MasterServices.java | 3 ++ .../assignment/CloseRegionProcedure.java | 5 +-- .../assignment/OpenRegionProcedure.java | 5 +-- .../assignment/RegionRemoteProcedureBase.java | 5 +-- .../procedure/FlushRegionProcedure.java | 5 +-- .../procedure/RSProcedureDispatcher.java | 31 +++++++++-------- .../procedure/SnapshotRegionProcedure.java | 8 +++-- .../procedure/SnapshotVerifyProcedure.java | 5 +-- .../procedure/SplitWALRemoteProcedure.java | 7 ++-- .../SwitchRpcThrottleRemoteProcedure.java | 3 +- .../ClaimReplicationQueueRemoteProcedure.java | 2 +- .../replication/RefreshPeerProcedure.java | 3 +- ...ncReplicationReplayWALRemoteProcedure.java | 5 +-- .../hbase/regionserver/HRegionServer.java | 15 +++++--- .../hbase/regionserver/RSRpcServices.java | 17 ++++++---- .../regionserver/RegionServerServices.java | 20 +++++++++-- .../RemoteProcedureResultReporter.java | 5 +-- .../hbase/regionserver/SplitRequest.java | 2 +- .../handler/AssignRegionHandler.java | 17 +++++++--- .../handler/CloseRegionHandler.java | 2 +- .../handler/OpenRegionHandler.java | 7 ++-- .../handler/RSProcedureHandler.java | 9 +++-- .../handler/UnassignRegionHandler.java | 17 +++++----- .../hbase/master/MockNoopMasterServices.java | 8 +++++ .../procedure/TestServerRemoteProcedure.java | 4 +-- 31 files changed, 192 insertions(+), 78 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 84bd5e9c08a1..427ad98f7fa5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -3097,10 +3097,12 @@ public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte } public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte[] regionName, - ServerName destinationServer, long closeProcId, boolean evictCache) { + ServerName destinationServer, long closeProcId, boolean evictCache, + long initiatingMasterActiveTime) { CloseRegionRequest.Builder builder = getBuilder(server, regionName, destinationServer, closeProcId); builder.setEvictCache(evictCache); + builder.setInitiatingMasterActiveTime(initiatingMasterActiveTime); return builder.build(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index faae19d16ae2..e6a9d8fb2bdf 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -222,14 +222,22 @@ protected final void submitTask(Runnable task, long delay, TimeUnit unit) { */ public static abstract class RemoteOperation { private final RemoteProcedure remoteProcedure; + // active time of the master that sent this request, used for fencing + private final long initiatingMasterActiveTime; - protected RemoteOperation(final RemoteProcedure remoteProcedure) { + protected RemoteOperation(final RemoteProcedure remoteProcedure, + long initiatingMasterActiveTime) { this.remoteProcedure = remoteProcedure; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; } public RemoteProcedure getRemoteProcedure() { return remoteProcedure; } + + public long getInitiatingMasterActiveTime() { + return initiatingMasterActiveTime; + } } /** diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto index 4ec09991b343..e68ba8e72869 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto @@ -97,6 +97,9 @@ message RegionStateTransition { optional uint64 open_seq_num = 3; repeated int64 proc_id = 4; + + // Master active time as fencing token + optional int64 initiating_master_active_time = 5; enum TransitionCode { OPENED = 0; FAILED_OPEN = 1; @@ -155,6 +158,8 @@ message RemoteProcedureResult { } required Status status = 2; optional ForeignExceptionMessage error = 3; + // Master active time as fencing token + optional int64 initiating_master_active_time = 4; } message ReportProcedureDoneRequest { repeated RemoteProcedureResult result = 1; diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index 308b1a8b6d62..230795f27479 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -80,6 +80,8 @@ message OpenRegionRequest { repeated RegionOpenInfo open_info = 1; // the intended server for this RPC. optional uint64 serverStartCode = 2; + // Master active time as fencing token + optional int64 initiating_master_active_time = 3; // wall clock time from master optional uint64 master_system_time = 5; @@ -123,6 +125,8 @@ message CloseRegionRequest { optional uint64 serverStartCode = 5; optional int64 close_proc_id = 6 [default = -1]; optional bool evict_cache = 7 [default = false]; + // Master active time as fencing token + optional int64 initiating_master_active_time = 8; } message CloseRegionResponse { @@ -272,6 +276,8 @@ message RemoteProcedureRequest { required uint64 proc_id = 1; required string proc_class = 2; optional bytes proc_data = 3; + // Master active time as fencing token + optional int64 initiating_master_active_time = 4; } message ExecuteProceduresRequest { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 0f4162cd1f74..c263a383bafd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3153,6 +3153,7 @@ public long getMasterStartTime() { } /** Returns timestamp in millis when HMaster became the active master. */ + @Override public long getMasterActiveTime() { return masterActiveTime; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 1da8e03d179e..faedc6dd628f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerMetrics; @@ -64,7 +65,6 @@ import org.apache.hadoop.hbase.ipc.QosPriority; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; -import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; @@ -396,6 +396,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; @@ -1854,6 +1855,15 @@ public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcContro ReportRegionStateTransitionRequest req) throws ServiceException { try { server.checkServiceStarted(); + for (RegionServerStatusProtos.RegionStateTransition transition : req.getTransitionList()) { + long procId = + transition.getProcIdCount() > 0 ? transition.getProcId(0) : Procedure.NO_PROC_ID; + // -1 is less than any possible MasterActiveCode + long initiatingMasterActiveTime = transition.hasInitiatingMasterActiveTime() + ? transition.getInitiatingMasterActiveTime() + : -1; + throwOnOldMaster(procId, initiatingMasterActiveTime); + } return server.getAssignmentManager().reportRegionStateTransition(req); } catch (IOException ioe) { throw new ServiceException(ioe); @@ -2553,8 +2563,14 @@ public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, // Check Masters is up and ready for duty before progressing. Remote side will keep trying. try { this.server.checkServiceStarted(); - } catch (ServerNotRunningYetException snrye) { - throw new ServiceException(snrye); + for (RemoteProcedureResult result : request.getResultList()) { + // -1 is less than any possible MasterActiveCode + long initiatingMasterActiveTime = + result.hasInitiatingMasterActiveTime() ? result.getInitiatingMasterActiveTime() : -1; + throwOnOldMaster(result.getProcId(), initiatingMasterActiveTime); + } + } catch (IOException ioe) { + throw new ServiceException(ioe); } request.getResultList().forEach(result -> { if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { @@ -2567,6 +2583,18 @@ public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, return ReportProcedureDoneResponse.getDefaultInstance(); } + private void throwOnOldMaster(long procId, long initiatingMasterActiveTime) + throws MasterNotRunningException { + if (initiatingMasterActiveTime > server.getMasterActiveTime()) { + // procedure is initiated by new active master but report received on master with older active + // time + LOG.warn( + "Report for procId: {} and initiatingMasterAT {} received on master with activeTime {}", + procId, initiatingMasterActiveTime, server.getMasterActiveTime()); + throw new MasterNotRunningException("Another master is active"); + } + } + @Override public FileArchiveNotificationResponse reportFileArchival(RpcController controller, FileArchiveNotificationRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 3aa5c2df751b..e9c98d624460 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -267,6 +267,9 @@ long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long /** Returns true if master is the active one */ boolean isActiveMaster(); + /** Returns timestamp in millis when this master became the active one. */ + long getMasterActiveTime(); + /** Returns true if master is initialized */ boolean isInitialized(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java index f51af7ac0d58..9a38952913d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java @@ -64,8 +64,9 @@ public TableOperationType getTableOperationType() { } @Override - public RemoteOperation newRemoteOperation() { - return new RegionCloseOperation(this, region, getProcId(), assignCandidate, evictCache); + public RemoteOperation newRemoteOperation(MasterProcedureEnv env) { + return new RegionCloseOperation(this, region, getProcId(), assignCandidate, evictCache, + env.getMasterServices().getMasterActiveTime()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.java index 6116c40d7c85..21218a1177a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.java @@ -57,8 +57,9 @@ public TableOperationType getTableOperationType() { } @Override - public RemoteOperation newRemoteOperation() { - return new RegionOpenOperation(this, region, getProcId()); + public RemoteOperation newRemoteOperation(MasterProcedureEnv env) { + return new RegionOpenOperation(this, region, getProcId(), + env.getMasterServices().getMasterActiveTime()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index 41466fce9549..c14af9e9cc7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -96,10 +96,11 @@ public Optional remoteCallBuild(Maste if (state == RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_REPORT_SUCCEED) { return Optional.empty(); } - return Optional.of(newRemoteOperation()); + return Optional.of(newRemoteOperation(env)); } - protected abstract RemoteProcedureDispatcher.RemoteOperation newRemoteOperation(); + protected abstract RemoteProcedureDispatcher.RemoteOperation + newRemoteOperation(MasterProcedureEnv env); @Override public void remoteOperationCompleted(MasterProcedureEnv env) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java index 88f7e652cbff..7c67f0e3ee90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java @@ -222,8 +222,9 @@ public Optional remoteCallBuild(MasterProcedureEnv env, ServerN } } } - return Optional.of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), - FlushRegionCallable.class, builder.build().toByteArray())); + return Optional + .of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), FlushRegionCallable.class, + builder.build().toByteArray(), env.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index dfd8c9587b27..ef025757c58e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -422,7 +422,7 @@ public void dispatchCloseRequests(final MasterProcedureEnv env, @Override public void dispatchServerOperations(MasterProcedureEnv env, List operations) { - operations.stream().map(o -> o.buildRequest()).forEachOrdered(request::addProc); + operations.stream().map(ServerOperation::buildRequest).forEachOrdered(request::addProc); } // will be overridden in test. @@ -441,7 +441,9 @@ protected final void remoteCallFailed(final MasterProcedureEnv env, final IOExce private static OpenRegionRequest buildOpenRegionRequest(final MasterProcedureEnv env, final ServerName serverName, final List operations) { final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); - builder.setServerStartCode(serverName.getStartcode()); + builder.setServerStartCode(serverName.getStartCode()); + operations.stream().map(RemoteOperation::getInitiatingMasterActiveTime).findAny() + .ifPresent(builder::setInitiatingMasterActiveTime); builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime()); for (RegionOpenOperation op : operations) { builder.addOpenInfo(op.buildRegionOpenInfoRequest(env)); @@ -464,8 +466,8 @@ public static final class ServerOperation extends RemoteOperation { private final byte[] rsProcData; public ServerOperation(RemoteProcedure remoteProcedure, long procId, Class rsProcClass, - byte[] rsProcData) { - super(remoteProcedure); + byte[] rsProcData, long initiatingMasterActiveTime) { + super(remoteProcedure, initiatingMasterActiveTime); this.procId = procId; this.rsProcClass = rsProcClass; this.rsProcData = rsProcData; @@ -473,7 +475,8 @@ public ServerOperation(RemoteProcedure remoteProcedure, long procId, Class rs public RemoteProcedureRequest buildRequest() { return RemoteProcedureRequest.newBuilder().setProcId(procId) - .setProcClass(rsProcClass.getName()).setProcData(ByteString.copyFrom(rsProcData)).build(); + .setProcClass(rsProcClass.getName()).setProcData(ByteString.copyFrom(rsProcData)) + .setInitiatingMasterActiveTime(getInitiatingMasterActiveTime()).build(); } } @@ -481,8 +484,9 @@ public static abstract class RegionOperation extends RemoteOperation { protected final RegionInfo regionInfo; protected final long procId; - protected RegionOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId) { - super(remoteProcedure); + protected RegionOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId, + long initiatingMasterActiveTime) { + super(remoteProcedure, initiatingMasterActiveTime); this.regionInfo = regionInfo; this.procId = procId; } @@ -490,9 +494,9 @@ protected RegionOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo public static class RegionOpenOperation extends RegionOperation { - public RegionOpenOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, - long procId) { - super(remoteProcedure, regionInfo, procId); + public RegionOpenOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId, + long initiatingMasterActiveTime) { + super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime); } public OpenRegionRequest.RegionOpenInfo @@ -507,8 +511,8 @@ public static class RegionCloseOperation extends RegionOperation { private boolean evictCache; public RegionCloseOperation(RemoteProcedure remoteProcedure, RegionInfo regionInfo, long procId, - ServerName destinationServer, boolean evictCache) { - super(remoteProcedure, regionInfo, procId); + ServerName destinationServer, boolean evictCache, long initiatingMasterActiveTime) { + super(remoteProcedure, regionInfo, procId, initiatingMasterActiveTime); this.destinationServer = destinationServer; this.evictCache = evictCache; } @@ -519,8 +523,7 @@ public ServerName getDestinationServer() { public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) { return ProtobufUtil.buildCloseRegionRequest(serverName, regionInfo.getRegionName(), - getDestinationServer(), procId, evictCache); - + getDestinationServer(), procId, evictCache, getInitiatingMasterActiveTime()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java index e465bff9745f..05621767e7f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java @@ -95,9 +95,11 @@ protected boolean holdLock(MasterProcedureEnv env) { @Override public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { - return Optional.of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), - SnapshotRegionCallable.class, MasterProcedureProtos.SnapshotRegionParameter.newBuilder() - .setRegion(ProtobufUtil.toRegionInfo(region)).setSnapshot(snapshot).build().toByteArray())); + return Optional + .of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), SnapshotRegionCallable.class, + MasterProcedureProtos.SnapshotRegionParameter.newBuilder() + .setRegion(ProtobufUtil.toRegionInfo(region)).setSnapshot(snapshot).build().toByteArray(), + env.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java index 5d3b25f7b14f..a3e126484c34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java @@ -224,8 +224,9 @@ protected void toStringClassDetails(StringBuilder builder) { public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { SnapshotVerifyParameter.Builder builder = SnapshotVerifyParameter.newBuilder(); builder.setSnapshot(snapshot).setRegion(ProtobufUtil.toRegionInfo(region)); - return Optional.of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), - SnapshotVerifyCallable.class, builder.build().toByteArray())); + return Optional + .of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), SnapshotVerifyCallable.class, + builder.build().toByteArray(), env.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java index 1e6bb78e250c..d1a49ebe6ec5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.java @@ -97,9 +97,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws @Override public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { - return Optional.of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), - SplitWALCallable.class, MasterProcedureProtos.SplitWALParameter.newBuilder() - .setWalPath(walPath).build().toByteArray())); + return Optional.of(new RSProcedureDispatcher.ServerOperation( + this, getProcId(), SplitWALCallable.class, MasterProcedureProtos.SplitWALParameter + .newBuilder().setWalPath(walPath).build().toByteArray(), + env.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java index 668897cd9a4b..69fa15e858ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleRemoteProcedure.java @@ -93,7 +93,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws SwitchRpcThrottleRemoteCallable.class, SwitchRpcThrottleRemoteStateData.newBuilder() .setTargetServer(ProtobufUtil.toServerName(remote)) - .setRpcThrottleEnabled(rpcThrottleEnabled).build().toByteArray())); + .setRpcThrottleEnabled(rpcThrottleEnabled).build().toByteArray(), + masterProcedureEnv.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java index e6cf46216759..9a5082ee82de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ClaimReplicationQueueRemoteProcedure.java @@ -97,7 +97,7 @@ public Optional remoteCallBuild(MasterProcedureEnv env, ServerN queueId.getSourceServerName() .ifPresent(sourceServer -> builder.setSourceServer(ProtobufUtil.toServerName(sourceServer))); return Optional.of(new ServerOperation(this, getProcId(), ClaimReplicationQueueCallable.class, - builder.build().toByteArray())); + builder.build().toByteArray(), env.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java index ef997fba4172..cf3bc5704759 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java @@ -119,7 +119,8 @@ public Optional remoteCallBuild(MasterProcedureEnv env, ServerN assert targetServer.equals(remote); return Optional.of(new ServerOperation(this, getProcId(), RefreshPeerCallable.class, RefreshPeerParameter.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type)) - .setTargetServer(ProtobufUtil.toServerName(remote)).setStage(stage).build().toByteArray())); + .setTargetServer(ProtobufUtil.toServerName(remote)).setStage(stage).build().toByteArray(), + env.getMasterServices().getMasterActiveTime())); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java index 40a00fdd4daf..ea0636290dd6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.java @@ -69,8 +69,9 @@ public Optional remoteCallBuild(MasterProcedureEnv env, ServerN ReplaySyncReplicationWALParameter.newBuilder(); builder.setPeerId(peerId); wals.stream().forEach(builder::addWal); - return Optional.of(new ServerOperation(this, getProcId(), - ReplaySyncReplicationWALCallable.class, builder.build().toByteArray())); + return Optional + .of(new ServerOperation(this, getProcId(), ReplaySyncReplicationWALCallable.class, + builder.build().toByteArray(), env.getMasterServices().getMasterActiveTime())); } protected boolean complete(MasterProcedureEnv env, Throwable error) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 191d1ebc5244..dedd0669113a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -2231,6 +2231,7 @@ public void postOpenDeployTasks(final PostOpenDeployContext context) throws IOEx HRegion r = context.getRegion(); long openProcId = context.getOpenProcId(); long masterSystemTime = context.getMasterSystemTime(); + long initiatingMasterActiveTime = context.getInitiatingMasterActiveTime(); rpcServices.checkOpen(); LOG.info("Post open deploy tasks for {}, pid={}, masterSystemTime={}", r.getRegionInfo().getRegionNameAsString(), openProcId, masterSystemTime); @@ -2254,7 +2255,7 @@ public void postOpenDeployTasks(final PostOpenDeployContext context) throws IOEx // Notify master if ( !reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.OPENED, - openSeqNum, openProcId, masterSystemTime, r.getRegionInfo())) + openSeqNum, openProcId, masterSystemTime, r.getRegionInfo(), initiatingMasterActiveTime)) ) { throw new IOException( "Failed to report opened region to master: " + r.getRegionInfo().getRegionNameAsString()); @@ -2315,6 +2316,7 @@ private boolean skipReportingTransition(final RegionStateTransitionContext conte for (long procId : procIds) { transition.addProcId(procId); } + transition.setInitiatingMasterActiveTime(context.getInitiatingMasterActiveTime()); return builder.build(); } @@ -3533,12 +3535,15 @@ public boolean reportFileArchivalForQuotas(TableName tableName, return true; } - void executeProcedure(long procId, RSProcedureCallable callable) { - executorService.submit(new RSProcedureHandler(this, procId, callable)); + void executeProcedure(long procId, long initiatingMasterActiveTime, + RSProcedureCallable callable) { + executorService + .submit(new RSProcedureHandler(this, procId, initiatingMasterActiveTime, callable)); } - public void remoteProcedureComplete(long procId, Throwable error) { - procedureResultReporter.complete(procId, error); + public void remoteProcedureComplete(long procId, long initiatingMasterActiveTime, + Throwable error) { + procedureResultReporter.complete(procId, initiatingMasterActiveTime, error); } void reportProcedureDone(ReportProcedureDoneRequest request) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 8082a2db69c8..e22b3f900178 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3859,6 +3859,8 @@ public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController control private void executeOpenRegionProcedures(OpenRegionRequest request, Map tdCache) { long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1; + long initiatingMasterActiveTime = + request.hasInitiatingMasterActiveTime() ? request.getInitiatingMasterActiveTime() : -1; for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) { RegionInfo regionInfo = ProtobufUtil.toRegionInfo(regionOpenInfo.getRegion()); TableName tableName = regionInfo.getTable(); @@ -3884,14 +3886,16 @@ private void executeOpenRegionProcedures(OpenRegionRequest request, } long procId = regionOpenInfo.getOpenProcId(); if (server.submitRegionProcedure(procId)) { - server.getExecutorService().submit( - AssignRegionHandler.create(server, regionInfo, procId, tableDesc, masterSystemTime)); + server.getExecutorService().submit(AssignRegionHandler.create(server, regionInfo, procId, + tableDesc, masterSystemTime, initiatingMasterActiveTime)); } } } private void executeCloseRegionProcedures(CloseRegionRequest request) { String encodedName; + long initiatingMasterActiveTime = + request.hasInitiatingMasterActiveTime() ? request.getInitiatingMasterActiveTime() : -1; try { encodedName = ProtobufUtil.getRegionEncodedName(request.getRegion()); } catch (DoNotRetryIOException e) { @@ -3903,8 +3907,8 @@ private void executeCloseRegionProcedures(CloseRegionRequest request) { long procId = request.getCloseProcId(); boolean evictCache = request.getEvictCache(); if (server.submitRegionProcedure(procId)) { - server.getExecutorService().submit( - UnassignRegionHandler.create(server, encodedName, procId, false, destination, evictCache)); + server.getExecutorService().submit(UnassignRegionHandler.create(server, encodedName, procId, + false, destination, evictCache, initiatingMasterActiveTime)); } } @@ -3916,12 +3920,13 @@ private void executeProcedures(RemoteProcedureRequest request) { } catch (Exception e) { LOG.warn("Failed to instantiating remote procedure {}, pid={}", request.getProcClass(), request.getProcId(), e); - server.remoteProcedureComplete(request.getProcId(), e); + server.remoteProcedureComplete(request.getProcId(), request.getInitiatingMasterActiveTime(), + e); return; } callable.init(request.getProcData().toByteArray(), server); LOG.debug("Executing remote procedure {}, pid={}", callable.getClass(), request.getProcId()); - server.executeProcedure(request.getProcId(), callable); + server.executeProcedure(request.getProcId(), request.getInitiatingMasterActiveTime(), callable); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 9c849ebf9577..a46e2dae695c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -93,11 +93,14 @@ class PostOpenDeployContext { private final HRegion region; private final long openProcId; private final long masterSystemTime; + private final long initiatingMasterActiveTime; - public PostOpenDeployContext(HRegion region, long openProcId, long masterSystemTime) { + public PostOpenDeployContext(HRegion region, long openProcId, long masterSystemTime, + long initiatingMasterActiveTime) { this.region = region; this.openProcId = openProcId; this.masterSystemTime = masterSystemTime; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; } public HRegion getRegion() { @@ -111,6 +114,10 @@ public long getOpenProcId() { public long getMasterSystemTime() { return masterSystemTime; } + + public long getInitiatingMasterActiveTime() { + return initiatingMasterActiveTime; + } } /** @@ -123,23 +130,26 @@ class RegionStateTransitionContext { private final TransitionCode code; private final long openSeqNum; private final long masterSystemTime; + private final long initiatingMasterActiveTime; private final long[] procIds; private final RegionInfo[] hris; public RegionStateTransitionContext(TransitionCode code, long openSeqNum, long masterSystemTime, - RegionInfo... hris) { + long initiatingMasterActiveTime, RegionInfo... hris) { this.code = code; this.openSeqNum = openSeqNum; this.masterSystemTime = masterSystemTime; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; this.hris = hris; this.procIds = new long[hris.length]; } public RegionStateTransitionContext(TransitionCode code, long openSeqNum, long procId, - long masterSystemTime, RegionInfo hri) { + long masterSystemTime, RegionInfo hri, long initiatingMasterActiveTime) { this.code = code; this.openSeqNum = openSeqNum; this.masterSystemTime = masterSystemTime; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; this.hris = new RegionInfo[] { hri }; this.procIds = new long[] { procId }; } @@ -163,6 +173,10 @@ public RegionInfo[] getHris() { public long[] getProcIds() { return procIds; } + + public long getInitiatingMasterActiveTime() { + return initiatingMasterActiveTime; + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java index 817ecd42ce0b..21016fe59dd0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java @@ -51,8 +51,9 @@ public RemoteProcedureResultReporter(HRegionServer server) { this.server = server; } - public void complete(long procId, Throwable error) { - RemoteProcedureResult.Builder builder = RemoteProcedureResult.newBuilder().setProcId(procId); + public void complete(long procId, long initiatingMasterActiveTime, Throwable error) { + RemoteProcedureResult.Builder builder = RemoteProcedureResult.newBuilder().setProcId(procId) + .setInitiatingMasterActiveTime(initiatingMasterActiveTime); if (error != null) { LOG.debug("Failed to complete execution of pid={}", procId, error); builder.setStatus(RemoteProcedureResult.Status.ERROR).setError( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 20a7e0c9af2f..d979a3ac82e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -81,7 +81,7 @@ private void requestRegionSplit() { // are created just to pass the information to the reportRegionStateTransition(). if ( !server.reportRegionStateTransition(new RegionStateTransitionContext( - TransitionCode.READY_TO_SPLIT, HConstants.NO_SEQNUM, -1, parent, hri_a, hri_b)) + TransitionCode.READY_TO_SPLIT, HConstants.NO_SEQNUM, -1, -1, parent, hri_a, hri_b)) ) { LOG.error("Unable to ask master to split " + parent.getRegionNameAsString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index ee4d3144f72a..9412edce9097 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -59,15 +59,20 @@ public class AssignRegionHandler extends EventHandler { private final long masterSystemTime; + // active time of the master that sent this assign request, used for fencing + private final long initiatingMasterActiveTime; + private final RetryCounter retryCounter; public AssignRegionHandler(HRegionServer server, RegionInfo regionInfo, long openProcId, - @Nullable TableDescriptor tableDesc, long masterSystemTime, EventType eventType) { + @Nullable TableDescriptor tableDesc, long masterSystemTime, long initiatingMasterActiveTime, + EventType eventType) { super(server, eventType); this.regionInfo = regionInfo; this.openProcId = openProcId; this.tableDesc = tableDesc; this.masterSystemTime = masterSystemTime; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; this.retryCounter = HandlerUtil.getRetryCounter(); } @@ -82,7 +87,7 @@ private void cleanUpAndReportFailure(IOException error) throws IOException { rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); if ( !rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, - HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo)) + HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo, initiatingMasterActiveTime)) ) { throw new IOException( "Failed to report failed open to master: " + regionInfo.getRegionNameAsString()); @@ -142,7 +147,8 @@ public void process() throws IOException { } // From here on out, this is PONR. We can not revert back. The only way to address an // exception from here on out is to abort the region server. - rs.postOpenDeployTasks(new PostOpenDeployContext(region, openProcId, masterSystemTime)); + rs.postOpenDeployTasks( + new PostOpenDeployContext(region, openProcId, masterSystemTime, initiatingMasterActiveTime)); rs.addRegion(region); LOG.info("Opened {}", regionName); // Cache the open region procedure id after report region transition succeed. @@ -169,7 +175,8 @@ protected void handleException(Throwable t) { } public static AssignRegionHandler create(HRegionServer server, RegionInfo regionInfo, - long openProcId, TableDescriptor tableDesc, long masterSystemTime) { + long openProcId, TableDescriptor tableDesc, long masterSystemTime, + long initiatingMasterActiveTime) { EventType eventType; if (regionInfo.isMetaRegion()) { eventType = EventType.M_RS_OPEN_META; @@ -182,6 +189,6 @@ public static AssignRegionHandler create(HRegionServer server, RegionInfo region eventType = EventType.M_RS_OPEN_REGION; } return new AssignRegionHandler(server, regionInfo, openProcId, tableDesc, masterSystemTime, - eventType); + initiatingMasterActiveTime, eventType); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java index e184cb42fb91..f18e7d9ba635 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java @@ -111,7 +111,7 @@ public void process() throws IOException { this.rsServices.removeRegion(region, destination); rsServices.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, - HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); + HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo, -1)); // Done! Region is closed on this RS LOG.debug("Closed {}", region.getRegionInfo().getRegionNameAsString()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 898121602a4e..0430b442410c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -166,8 +166,9 @@ private void doCleanUpOnFailedOpen(HRegion region) throws IOException { cleanupFailedOpen(region); } } finally { - rsServices.reportRegionStateTransition(new RegionStateTransitionContext( - TransitionCode.FAILED_OPEN, HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); + rsServices + .reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, + HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo, -1)); } } @@ -253,7 +254,7 @@ static class PostOpenDeployTasksThread extends Thread { public void run() { try { this.services.postOpenDeployTasks( - new PostOpenDeployContext(region, Procedure.NO_PROC_ID, masterSystemTime)); + new PostOpenDeployContext(region, Procedure.NO_PROC_ID, masterSystemTime, -1)); } catch (Throwable e) { String msg = "Exception running postOpenDeployTasks; region=" + this.region.getRegionInfo().getEncodedName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java index d3ecc8a51e22..6eacc6b78e6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java @@ -35,12 +35,17 @@ public class RSProcedureHandler extends EventHandler { private final long procId; + // active time of the master that sent procedure request, used for fencing + private final long initiatingMasterActiveTime; + private final RSProcedureCallable callable; - public RSProcedureHandler(HRegionServer rs, long procId, RSProcedureCallable callable) { + public RSProcedureHandler(HRegionServer rs, long procId, long initiatingMasterActiveTime, + RSProcedureCallable callable) { super(rs, callable.getEventType()); this.procId = procId; this.callable = callable; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; } @Override @@ -53,7 +58,7 @@ public void process() { LOG.error("pid=" + this.procId, t); error = t; } finally { - ((HRegionServer) server).remoteProcedureComplete(procId, error); + ((HRegionServer) server).remoteProcedureComplete(procId, initiatingMasterActiveTime, error); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index a360759aea15..8459b0bfb806 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -63,13 +63,12 @@ public class UnassignRegionHandler extends EventHandler { private boolean evictCache; - public UnassignRegionHandler(HRegionServer server, String encodedName, long closeProcId, - boolean abort, @Nullable ServerName destination, EventType eventType) { - this(server, encodedName, closeProcId, abort, destination, eventType, false); - } + // active time of the master that sent this unassign request, used for fencing + private final long initiatingMasterActiveTime; public UnassignRegionHandler(HRegionServer server, String encodedName, long closeProcId, - boolean abort, @Nullable ServerName destination, EventType eventType, boolean evictCache) { + boolean abort, @Nullable ServerName destination, EventType eventType, + long initiatingMasterActiveTime, boolean evictCache) { super(server, eventType); this.encodedName = encodedName; this.closeProcId = closeProcId; @@ -77,6 +76,7 @@ public UnassignRegionHandler(HRegionServer server, String encodedName, long clos this.destination = destination; this.retryCounter = HandlerUtil.getRetryCounter(); this.evictCache = evictCache; + this.initiatingMasterActiveTime = initiatingMasterActiveTime; } private HRegionServer getServer() { @@ -138,7 +138,7 @@ public void process() throws IOException { rs.removeRegion(region, destination); if ( !rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, - HConstants.NO_SEQNUM, closeProcId, -1, region.getRegionInfo())) + HConstants.NO_SEQNUM, closeProcId, -1, region.getRegionInfo(), initiatingMasterActiveTime)) ) { throw new IOException("Failed to report close to master: " + regionName); } @@ -158,7 +158,8 @@ protected void handleException(Throwable t) { } public static UnassignRegionHandler create(HRegionServer server, String encodedName, - long closeProcId, boolean abort, @Nullable ServerName destination, boolean evictCache) { + long closeProcId, boolean abort, @Nullable ServerName destination, boolean evictCache, + long initiatingMasterActiveTime) { // Just try our best to determine whether it is for closing meta. It is not the end of the world // if we put the handler into a wrong executor. Region region = server.getRegion(encodedName); @@ -166,6 +167,6 @@ public static UnassignRegionHandler create(HRegionServer server, String encodedN ? EventType.M_RS_CLOSE_META : EventType.M_RS_CLOSE_REGION; return new UnassignRegionHandler(server, encodedName, closeProcId, abort, destination, - eventType, evictCache); + eventType, initiatingMasterActiveTime, evictCache); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 3d4d63722e09..e78ca7d0cdb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hbase.thirdparty.com.google.protobuf.Service; @@ -69,10 +70,12 @@ public class MockNoopMasterServices implements MasterServices { private final Configuration conf; private final MetricsMaster metricsMaster; + private final long masterActiveTime; public MockNoopMasterServices(final Configuration conf) { this.conf = conf; this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(mock(HMaster.class))); + this.masterActiveTime = EnvironmentEdgeManager.currentTime(); } @Override @@ -327,6 +330,11 @@ public boolean isActiveMaster() { return true; } + @Override + public long getMasterActiveTime() { + return masterActiveTime; + } + @Override public boolean isInitialized() { return false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java index d3fca2f59895..1500a3c00cd3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java @@ -183,8 +183,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws @Override public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { - return Optional - .of(new RSProcedureDispatcher.ServerOperation(null, 0L, this.getClass(), new byte[0])); + return Optional.of(new RSProcedureDispatcher.ServerOperation(null, 0L, this.getClass(), + new byte[0], env.getMasterServices().getMasterActiveTime())); } @Override From 27684d0bc8bc50228783f45f6c1444ef8fd4c0f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andor=20Moln=C3=A1r?= Date: Wed, 28 Aug 2024 10:13:22 -0500 Subject: [PATCH 500/514] HBASE-28777 mTLS client hostname verification doesn't work with OptionalSslHandler (#6149) Signed-off-by: Balazs Meszaros --- .../io/crypto/tls/HBaseTrustManager.java | 7 +- .../io/crypto/tls/TestHBaseTrustManager.java | 41 ++++ .../hbase/io/crypto/tls/X509TestContext.java | 9 +- .../hadoop/hbase/ipc/NettyRpcServer.java | 36 ++-- .../ipc/OptionalSslHandlerWithHostPort.java | 40 ++++ .../TestMutualTlsClientSideNonLocalhost.java | 178 ++++++++++++++++++ 6 files changed, 289 insertions(+), 22 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/OptionalSslHandlerWithHostPort.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMutualTlsClientSideNonLocalhost.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/HBaseTrustManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/HBaseTrustManager.java index ca4756a6131c..15beb5f60bc9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/HBaseTrustManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/HBaseTrustManager.java @@ -92,8 +92,13 @@ public void checkServerTrusted(X509Certificate[] chain, String authType, Socket public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException { x509ExtendedTrustManager.checkClientTrusted(chain, authType, engine); - if (hostnameVerificationEnabled) { + if (hostnameVerificationEnabled && engine != null) { try { + if (engine.getPeerHost() == null) { + LOG.warn( + "Cannot perform client hostname verification, because peer information is not available"); + return; + } performHostVerification(InetAddress.getByName(engine.getPeerHost()), chain[0]); } catch (UnknownHostException e) { throw new CertificateException("Failed to verify host", e); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestHBaseTrustManager.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestHBaseTrustManager.java index 07fc87e01354..cfe197ae8d37 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestHBaseTrustManager.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestHBaseTrustManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.crypto.tls; import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -36,6 +37,7 @@ import java.util.Date; import java.util.List; import java.util.Random; +import javax.net.ssl.SSLEngine; import javax.net.ssl.X509ExtendedTrustManager; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -86,6 +88,8 @@ public class TestHBaseTrustManager { private InetAddress mockInetAddressWithHostname; private Socket mockSocketWithoutHostname; private Socket mockSocketWithHostname; + private SSLEngine mockSSLEngineWithoutPeerhost; + private SSLEngine mockSSLEngineWithPeerhost; @BeforeClass public static void createKeyPair() throws Exception { @@ -126,6 +130,12 @@ public void setup() throws Exception { mockSocketWithHostname = mock(Socket.class); when(mockSocketWithHostname.getInetAddress()) .thenAnswer((Answer) invocationOnMock -> mockInetAddressWithHostname); + + mockSSLEngineWithoutPeerhost = mock(SSLEngine.class); + doReturn(null).when(mockSSLEngineWithoutPeerhost).getPeerHost(); + + mockSSLEngineWithPeerhost = mock(SSLEngine.class); + doReturn(IP_ADDRESS).when(mockSSLEngineWithPeerhost).getPeerHost(); } @SuppressWarnings("JavaUtilDate") @@ -352,4 +362,35 @@ public void testClientTrustedWithHostnameVerificationEnabledWithHostnameNoRevers mockSocketWithHostname); } + @Test + public void testClientTrustedSslEngineWithPeerHostReverseLookup() throws Exception { + HBaseTrustManager trustManager = + new HBaseTrustManager(mockX509ExtendedTrustManager, true, true); + X509Certificate[] certificateChain = createSelfSignedCertificateChain(null, HOSTNAME); + trustManager.checkClientTrusted(certificateChain, null, mockSSLEngineWithPeerhost); + } + + @Test(expected = CertificateException.class) + public void testClientTrustedSslEngineWithPeerHostNoReverseLookup() throws Exception { + HBaseTrustManager trustManager = + new HBaseTrustManager(mockX509ExtendedTrustManager, true, false); + X509Certificate[] certificateChain = createSelfSignedCertificateChain(null, HOSTNAME); + trustManager.checkClientTrusted(certificateChain, null, mockSSLEngineWithPeerhost); + } + + @Test + public void testClientTrustedSslEngineWithoutPeerHost() throws Exception { + HBaseTrustManager trustManager = + new HBaseTrustManager(mockX509ExtendedTrustManager, true, false); + X509Certificate[] certificateChain = createSelfSignedCertificateChain(null, HOSTNAME); + trustManager.checkClientTrusted(certificateChain, null, mockSSLEngineWithoutPeerhost); + } + + @Test + public void testClientTrustedSslEngineNotAvailable() throws Exception { + HBaseTrustManager trustManager = + new HBaseTrustManager(mockX509ExtendedTrustManager, true, false); + X509Certificate[] certificateChain = createSelfSignedCertificateChain(null, HOSTNAME); + trustManager.checkClientTrusted(certificateChain, null, (SSLEngine) null); + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java index ad4ffe0ab5a4..86f9818f523d 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java @@ -461,12 +461,13 @@ public X509TestContext cloneWithNewKeystoreCert(X509Certificate cert) { } public void regenerateStores(X509KeyType keyStoreKeyType, X509KeyType trustStoreKeyType, - KeyStoreFileType keyStoreFileType, KeyStoreFileType trustStoreFileType) + KeyStoreFileType keyStoreFileType, KeyStoreFileType trustStoreFileType, + String... subjectAltNames) throws GeneralSecurityException, IOException, OperatorCreationException { trustStoreKeyPair = X509TestHelpers.generateKeyPair(trustStoreKeyType); keyStoreKeyPair = X509TestHelpers.generateKeyPair(keyStoreKeyType); - createCertificates(); + createCertificates(subjectAltNames); switch (keyStoreFileType) { case JKS: @@ -499,7 +500,7 @@ public void regenerateStores(X509KeyType keyStoreKeyType, X509KeyType trustStore } } - private void createCertificates() + private void createCertificates(String... subjectAltNames) throws GeneralSecurityException, IOException, OperatorCreationException { X500NameBuilder caNameBuilder = new X500NameBuilder(BCStyle.INSTANCE); caNameBuilder.addRDN(BCStyle.CN, @@ -510,7 +511,7 @@ private void createCertificates() X500NameBuilder nameBuilder = new X500NameBuilder(BCStyle.INSTANCE); nameBuilder.addRDN(BCStyle.CN, MethodHandles.lookup().lookupClass().getCanonicalName() + " Zookeeper Test"); - keyStoreCertificate = newCert(nameBuilder.build()); + keyStoreCertificate = newCert(nameBuilder.build(), subjectAltNames); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index 1d93fbd0f668..b21b6e19c78e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.HBASE_SERVER_NETTY_TLS_ENABLED; import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT; import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.HBASE_SERVER_NETTY_TLS_WRAP_SIZE; -import static org.apache.hadoop.hbase.io.crypto.tls.X509Util.TLS_CONFIG_REVERSE_DNS_LOOKUP_ENABLED; import java.io.IOException; import java.io.InterruptedIOException; @@ -386,8 +385,24 @@ private void initSSL(ChannelPipeline p, NettyServerRpcConnection conn, boolean s throws X509Exception, IOException { SslContext nettySslContext = getSslContext(); + /* + * our HostnameVerifier gets the host name from SSLEngine, so we have to construct the engine + * properly by passing the remote address + */ + if (supportPlaintext) { - p.addLast("ssl", new OptionalSslHandler(nettySslContext)); + SocketAddress remoteAddress = p.channel().remoteAddress(); + OptionalSslHandler optionalSslHandler; + + if (remoteAddress instanceof InetSocketAddress) { + InetSocketAddress remoteInetAddress = (InetSocketAddress) remoteAddress; + optionalSslHandler = new OptionalSslHandlerWithHostPort(nettySslContext, + remoteInetAddress.getHostString(), remoteInetAddress.getPort()); + } else { + optionalSslHandler = new OptionalSslHandler(nettySslContext); + } + + p.addLast("ssl", optionalSslHandler); LOG.debug("Dual mode SSL handler added for channel: {}", p.channel()); } else { SocketAddress remoteAddress = p.channel().remoteAddress(); @@ -395,21 +410,8 @@ private void initSSL(ChannelPipeline p, NettyServerRpcConnection conn, boolean s if (remoteAddress instanceof InetSocketAddress) { InetSocketAddress remoteInetAddress = (InetSocketAddress) remoteAddress; - String host; - - if (conf.getBoolean(TLS_CONFIG_REVERSE_DNS_LOOKUP_ENABLED, true)) { - host = remoteInetAddress.getHostName(); - } else { - host = remoteInetAddress.getHostString(); - } - - int port = remoteInetAddress.getPort(); - - /* - * our HostnameVerifier gets the host name from SSLEngine, so we have to construct the - * engine properly by passing the remote address - */ - sslHandler = nettySslContext.newHandler(p.channel().alloc(), host, port); + sslHandler = nettySslContext.newHandler(p.channel().alloc(), + remoteInetAddress.getHostString(), remoteInetAddress.getPort()); } else { sslHandler = nettySslContext.newHandler(p.channel().alloc()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/OptionalSslHandlerWithHostPort.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/OptionalSslHandlerWithHostPort.java new file mode 100644 index 000000000000..d349f1c6c783 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/OptionalSslHandlerWithHostPort.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.OptionalSslHandler; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslContext; +import org.apache.hbase.thirdparty.io.netty.handler.ssl.SslHandler; + +class OptionalSslHandlerWithHostPort extends OptionalSslHandler { + + private final String host; + private final int port; + + public OptionalSslHandlerWithHostPort(SslContext sslContext, String host, int port) { + super(sslContext); + this.host = host; + this.port = port; + } + + @Override + protected SslHandler newSslHandler(ChannelHandlerContext context, SslContext sslContext) { + return sslContext.newHandler(context.alloc(), host, port); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMutualTlsClientSideNonLocalhost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMutualTlsClientSideNonLocalhost.java new file mode 100644 index 000000000000..f22d3f086470 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestMutualTlsClientSideNonLocalhost.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import static org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl.SERVICE; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.security.GeneralSecurityException; +import java.security.Security; +import java.util.List; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.io.crypto.tls.KeyStoreFileType; +import org.apache.hadoop.hbase.io.crypto.tls.X509KeyType; +import org.apache.hadoop.hbase.io.crypto.tls.X509TestContext; +import org.apache.hadoop.hbase.io.crypto.tls.X509TestContextProvider; +import org.apache.hadoop.hbase.io.crypto.tls.X509Util; +import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; +import org.apache.hadoop.hbase.ipc.NettyRpcClient; +import org.apache.hadoop.hbase.ipc.NettyRpcServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RPCTests; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.operator.OperatorCreationException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos; +import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos; + +/** + * Tests for client-side mTLS focusing on client hostname verification in the case when client and + * server are on different hosts. We try to simulate this behaviour by querying the hostname with + *

    + * InetAddress.getLocalHost() + *

    + * Certificates are generated with the hostname in Subject Alternative Names, server binds + * non-localhost interface and client connects via remote IP address. Parameter is set to verify + * both TLS/plaintext and TLS-only cases. + */ +@RunWith(Parameterized.class) +@Category({ RPCTests.class, MediumTests.class }) +public class TestMutualTlsClientSideNonLocalhost { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMutualTlsClientSideNonLocalhost.class); + + protected static HBaseCommonTestingUtil UTIL; + + protected static File DIR; + + protected static X509TestContextProvider PROVIDER; + + private X509TestContext x509TestContext; + + protected RpcServer rpcServer; + + protected RpcClient rpcClient; + private TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface stub; + + @Parameterized.Parameter(0) + public boolean supportPlaintext; + + @Parameterized.Parameters(name = "{index}: supportPlaintext={0}") + public static List data() { + return List.of(true, false); + } + + @BeforeClass + public static void setUpBeforeClass() throws IOException { + UTIL = new HBaseCommonTestingUtil(); + Security.addProvider(new BouncyCastleProvider()); + DIR = + new File(UTIL.getDataTestDir(AbstractTestTlsRejectPlainText.class.getSimpleName()).toString()) + .getCanonicalFile(); + FileUtils.forceMkdir(DIR); + Configuration conf = UTIL.getConfiguration(); + conf.setClass(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, NettyRpcClient.class, + RpcClient.class); + conf.setClass(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, NettyRpcServer.class, + RpcServer.class); + conf.setBoolean(X509Util.HBASE_SERVER_NETTY_TLS_ENABLED, true); + conf.setBoolean(X509Util.HBASE_CLIENT_NETTY_TLS_ENABLED, true); + PROVIDER = new X509TestContextProvider(conf, DIR); + } + + @AfterClass + public static void cleanUp() { + Security.removeProvider(BouncyCastleProvider.PROVIDER_NAME); + UTIL.cleanupTestDir(); + } + + @Before + public void setUp() throws Exception { + x509TestContext = PROVIDER.get(X509KeyType.RSA, X509KeyType.RSA, "keyPassword".toCharArray()); + x509TestContext.setConfigurations(KeyStoreFileType.JKS, KeyStoreFileType.JKS); + + Configuration serverConf = new Configuration(UTIL.getConfiguration()); + Configuration clientConf = new Configuration(UTIL.getConfiguration()); + + initialize(serverConf, clientConf); + + InetSocketAddress isa = new InetSocketAddress(InetAddress.getLocalHost(), 0); + + rpcServer = new NettyRpcServer(null, "testRpcServer", + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), isa, serverConf, + new FifoRpcScheduler(serverConf, 1), true); + rpcServer.start(); + + rpcClient = new NettyRpcClient(clientConf); + stub = TestProtobufRpcServiceImpl.newBlockingStub(rpcClient, rpcServer.getListenerAddress()); + } + + private void initialize(Configuration serverConf, Configuration clientConf) + throws GeneralSecurityException, IOException, OperatorCreationException { + serverConf.setBoolean(X509Util.HBASE_SERVER_NETTY_TLS_SUPPORTPLAINTEXT, supportPlaintext); + clientConf.setBoolean(X509Util.HBASE_CLIENT_NETTY_TLS_VERIFY_SERVER_HOSTNAME, true); + x509TestContext.regenerateStores(X509KeyType.RSA, X509KeyType.RSA, KeyStoreFileType.JKS, + KeyStoreFileType.JKS, InetAddress.getLocalHost().getHostName()); + } + + @After + public void tearDown() throws IOException { + if (rpcServer != null) { + rpcServer.stop(); + } + Closeables.close(rpcClient, true); + x509TestContext.clearConfigurations(); + x509TestContext.getConf().unset(X509Util.TLS_CONFIG_OCSP); + x509TestContext.getConf().unset(X509Util.TLS_CONFIG_CLR); + x509TestContext.getConf().unset(X509Util.TLS_CONFIG_PROTOCOL); + System.clearProperty("com.sun.net.ssl.checkRevocation"); + System.clearProperty("com.sun.security.enableCRLDP"); + Security.setProperty("ocsp.enable", Boolean.FALSE.toString()); + Security.setProperty("com.sun.security.enableCRLDP", Boolean.FALSE.toString()); + } + + @Test + public void testClientAuth() throws Exception { + stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage("hello world").build()); + } +} From 2ac657caa5143c4fa36b7322d0636a51b2c235dc Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Fri, 30 Aug 2024 12:10:37 +0530 Subject: [PATCH 501/514] HBASE-27903 Skip submitting Split/Merge procedure when split/merge is disabled at table level (#6169) - Fail fast by adding a check before even submitting a procedure - Update test cases to assert for expected exception post this change - Remove deprecated method mergeRegionsAsync's usage in test - Make use of RegionInfo.getShortNameToLog instead of logging complete region info - Update comments in procedure implementation Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/master/HMaster.java | 23 +++++++++---- .../MergeTableRegionsProcedure.java | 33 ++++++++++--------- .../assignment/SplitTableRegionProcedure.java | 10 +++--- .../client/TestSplitOrMergeAtTableLevel.java | 10 +++--- 4 files changed, 46 insertions(+), 30 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c263a383bafd..e3c56b4df8f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2256,21 +2256,26 @@ public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcib final long nonce) throws IOException { checkInitialized(); + final String regionNamesToLog = RegionInfo.getShortNameToLog(regionsToMerge); + if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { - String regionsStr = Arrays.deepToString(regionsToMerge); - LOG.warn("Merge switch is off! skip merge of " + regionsStr); + LOG.warn("Merge switch is off! skip merge of " + regionNamesToLog); + throw new DoNotRetryIOException( + "Merge of " + regionNamesToLog + " failed because merge switch is off"); + } + + if (!getTableDescriptors().get(regionsToMerge[0].getTable()).isMergeEnabled()) { + LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionNamesToLog); throw new DoNotRetryIOException( - "Merge of " + regionsStr + " failed because merge switch is off"); + "Merge of " + regionNamesToLog + " failed as region merge is disabled for the table"); } - final String mergeRegionsStr = Arrays.stream(regionsToMerge).map(RegionInfo::getEncodedName) - .collect(Collectors.joining(", ")); return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) { @Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); String aid = getClientIdAuditPrefix(); - LOG.info("{} merge regions {}", aid, mergeRegionsStr); + LOG.info("{} merge regions {}", aid, regionNamesToLog); submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), regionsToMerge, forcible)); getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); @@ -2294,6 +2299,12 @@ public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, fina "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off"); } + if (!getTableDescriptors().get(regionInfo.getTable()).isSplitEnabled()) { + LOG.warn("Split is disabled for the table! Skipping split of {}", regionInfo); + throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString() + + " failed as region split is disabled for the table"); + } + return MasterProcedureUtil .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index b9a3ee13361d..201fc7321759 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -443,27 +443,28 @@ protected ProcedureMetrics getProcedureMetrics(MasterProcedureEnv env) { private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOException { // Fail if we are taking snapshot for the given table TableName tn = regionsToMerge[0].getTable(); + final String regionNamesToLog = RegionInfo.getShortNameToLog(regionsToMerge); if (env.getMasterServices().getSnapshotManager().isTableTakingAnySnapshot(tn)) { - throw new MergeRegionException("Skip merging regions " - + RegionInfo.getShortNameToLog(regionsToMerge) + ", because we are snapshotting " + tn); + throw new MergeRegionException( + "Skip merging regions " + regionNamesToLog + ", because we are snapshotting " + tn); } - // Mostly this check is not used because we already check the switch before submit a merge - // procedure. Just for safe, check the switch again. This procedure can be rollbacked if - // the switch was set to false after submit. + // Mostly the below two checks are not used because we already check the switches before + // submitting the merge procedure. Just for safety, we are checking the switch again here. + // Also, in case the switch was set to false after submission, this procedure can be rollbacked, + // thanks to this double check! + // case 1: check for cluster level switch if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { - String regionsStr = Arrays.deepToString(this.regionsToMerge); - LOG.warn("Merge switch is off! skip merge of " + regionsStr); + LOG.warn("Merge switch is off! skip merge of " + regionNamesToLog); setFailure(getClass().getSimpleName(), - new IOException("Merge of " + regionsStr + " failed because merge switch is off")); + new IOException("Merge of " + regionNamesToLog + " failed because merge switch is off")); return false; } - + // case 2: check for table level switch if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isMergeEnabled()) { - String regionsStr = Arrays.deepToString(regionsToMerge); - LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionsStr); + LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionNamesToLog); setFailure(getClass().getSimpleName(), new IOException( - "Merge of " + regionsStr + " failed as region merge is disabled for the table")); + "Merge of " + regionNamesToLog + " failed as region merge is disabled for the table")); return false; } @@ -471,8 +472,8 @@ private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOExcept RegionStateStore regionStateStore = env.getAssignmentManager().getRegionStateStore(); for (RegionInfo ri : this.regionsToMerge) { if (regionStateStore.hasMergeRegions(ri)) { - String msg = "Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge) - + ", because a parent, " + RegionInfo.getShortNameToLog(ri) + ", has a merge qualifier " + String msg = "Skip merging " + regionNamesToLog + ", because a parent, " + + RegionInfo.getShortNameToLog(ri) + ", has a merge qualifier " + "(if a 'merge column' in parent, it was recently merged but still has outstanding " + "references to its parents that must be cleared before it can participate in merge -- " + "major compact it to hurry clearing of its references)"; @@ -492,8 +493,8 @@ private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOExcept try { if (!isMergeable(env, state)) { setFailure(getClass().getSimpleName(), - new MergeRegionException("Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge) - + ", because a parent, " + RegionInfo.getShortNameToLog(ri) + ", is not mergeable")); + new MergeRegionException("Skip merging " + regionNamesToLog + ", because a parent, " + + RegionInfo.getShortNameToLog(ri) + ", is not mergeable")); return false; } } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index afa0f5e42b07..01cd012ddad1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -547,16 +547,18 @@ public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOExcepti return false; } - // Mostly this check is not used because we already check the switch before submit a split - // procedure. Just for safe, check the switch again. This procedure can be rollbacked if - // the switch was set to false after submit. + // Mostly the below two checks are not used because we already check the switches before + // submitting the split procedure. Just for safety, we are checking the switch again here. + // Also, in case the switch was set to false after submission, this procedure can be rollbacked, + // thanks to this double check! + // case 1: check for cluster level switch if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { LOG.warn("pid=" + getProcId() + " split switch is off! skip split of " + parentHRI); setFailure(new IOException( "Split region " + parentHRI.getRegionNameAsString() + " failed due to split switch off")); return false; } - + // case 2: check for table level switch if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isSplitEnabled()) { LOG.warn("pid={}, split is disabled for the table! Skipping split of {}", getProcId(), parentHRI); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java index d535fc54356d..c5c31c1d6fd7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java @@ -25,6 +25,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -32,7 +33,6 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -125,7 +125,7 @@ public void testTableMergeSwitch() throws Exception { assertFalse(admin.getDescriptor(tableName).isMergeEnabled()); trySplitAndEnsureItIsSuccess(tableName); - Threads.sleep(10000); + tryMergeAndEnsureItFails(tableName); admin.disableTable(tableName); enableTableMerge(tableName); @@ -166,6 +166,7 @@ private void trySplitAndEnsureItFails(final TableName tableName) throws Exceptio // expected to reach here // check and ensure that table does not get splitted assertTrue(admin.getRegions(tableName).size() == originalCount); + assertTrue("Expected DoNotRetryIOException!", ee.getCause() instanceof DoNotRetryIOException); } } @@ -217,7 +218,7 @@ private void tryMergeAndEnsureItFails(final TableName tableName) throws Exceptio byte[] nameOfRegionB = regions.get(1).getEncodedNameAsBytes(); // check and ensure that region do not get merged - Future f = admin.mergeRegionsAsync(nameOfRegionA, nameOfRegionB, true); + Future f = admin.mergeRegionsAsync(new byte[][] { nameOfRegionA, nameOfRegionB }, true); try { f.get(10, TimeUnit.SECONDS); fail("Should not get here."); @@ -225,6 +226,7 @@ private void tryMergeAndEnsureItFails(final TableName tableName) throws Exceptio // expected to reach here // check and ensure that region do not get merged assertTrue(admin.getRegions(tableName).size() == originalCount); + assertTrue("Expected DoNotRetryIOException!", ee.getCause() instanceof DoNotRetryIOException); } } @@ -255,7 +257,7 @@ private void tryMergeAndEnsureItIsSuccess(final TableName tableName) throws Exce byte[] nameOfRegionB = regions.get(1).getEncodedNameAsBytes(); // merge the table regions and wait until region count decreases - admin.mergeRegionsAsync(nameOfRegionA, nameOfRegionB, true); + admin.mergeRegionsAsync(new byte[][] { nameOfRegionA, nameOfRegionB }, true); TEST_UTIL.waitFor(30000, new ExplainingPredicate() { @Override From 2785a3e9c5baac78f584e452f1fbc8a53618db30 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Sat, 31 Aug 2024 09:31:36 +0530 Subject: [PATCH 502/514] HBASE-28793 Update hbase-thirdparty to 4.1.8 (#6166) HBASE-28793 Update hbase-thirdparty to 4.1.8 Signed-off-by: Nihal Jain Signed-off-by: Duo Zhang Reviewed-by: Vineet Kumar Maheshwari --- hbase-examples/pom.xml | 2 +- .../apache/hadoop/hbase/IntegrationTestsDriver.java | 2 +- hbase-protocol-shaded/pom.xml | 2 +- .../hadoop/hbase/util/compaction/MajorCompactor.java | 2 +- .../hbase/quotas/TestSpaceQuotasWithSnapshots.java | 4 ++-- pom.xml | 10 +++++----- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 4b4a8f3b55bb..00a66cfcc9c2 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -33,7 +33,7 @@ - 4.26.1 + 4.27.3 diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java index 3b590493a9fd..dcc2e3234da4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java @@ -77,7 +77,7 @@ protected void addOptions() { @Override protected void processOptions(CommandLine cmd) { - String testFilterString = cmd.getOptionValue(SHORT_REGEX_ARG, null); + String testFilterString = cmd.getOptionValue(SHORT_REGEX_ARG); if (testFilterString != null) { intTestFilter.setPattern(testFilterString); } diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 2c73844b7c02..cdacdeebb0f9 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -34,7 +34,7 @@ - 4.26.1 + 4.27.3 3.10.6.Final - 4.1.108.Final + 4.1.112.Final 0.15.0 0.15.0 @@ -852,8 +852,8 @@ Note that the version of jackson-[annotations,core,databind] must be kept in sync with the version of jackson-jaxrs-json-provider shipped in hbase-thirdparty. --> - 2.17.0 - 2.17.0 + 2.17.2 + 2.17.2 2.3.1 3.1.0 2.1.1 @@ -894,7 +894,7 @@ --> 8.29 3.1.0 - 2.26.1 + 2.28.0 2.4.2 1.0.0 1.8 @@ -926,7 +926,7 @@ databind] must be kept in sync with the version of jackson-jaxrs-json-provider shipped in hbase-thirdparty. --> - 4.1.7 + 4.1.8 0.8.8 From 9d3ceb8964f1c0b907d11dfce29a6ca2abab2709 Mon Sep 17 00:00:00 2001 From: Sreenivasulu Date: Sat, 31 Aug 2024 10:58:03 +0530 Subject: [PATCH 503/514] HBASE-28802 Log the IP when hbase.server.useip.enabled is set to true (#6176) Signed-off-by: Pankaj Kumar Reviewed-by: Vineet Kumar Maheshwari --- .../org/apache/hadoop/hbase/regionserver/HRegionServer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index dedd0669113a..a4105a31bfac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1420,7 +1420,7 @@ protected void handleReportForDutyResponse(final RegionServerStartupResponse c) if (!isHostnameConsist) { String msg = "Master passed us a different hostname to use; was=" + (StringUtils.isBlank(useThisHostnameInstead) - ? rpcServices.getSocketAddress().getHostName() + ? expectedHostName : this.useThisHostnameInstead) + ", but now=" + hostnameFromMasterPOV; LOG.error(msg); From dcffc4a7bb607b3d3e87ca75deea188cc21740ed Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 2 Sep 2024 04:15:13 -0400 Subject: [PATCH 504/514] HBASE-28643 An unbounded backup failure message can cause an irrecoverable state for the given backup (#6088) Co-authored-by: Ray Mattingly Signed-off-by: Nick Dimiduk --- .../main/java/org/apache/hadoop/hbase/backup/BackupInfo.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index 1fad5b6cfdb1..5a0094740d63 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -44,6 +44,7 @@ @InterfaceAudience.Private public class BackupInfo implements Comparable { private static final Logger LOG = LoggerFactory.getLogger(BackupInfo.class); + private static final int MAX_FAILED_MESSAGE_LENGTH = 1024; public interface Filter { /** @@ -266,6 +267,9 @@ public String getFailedMsg() { } public void setFailedMsg(String failedMsg) { + if (failedMsg.length() > MAX_FAILED_MESSAGE_LENGTH) { + failedMsg = failedMsg.substring(0, MAX_FAILED_MESSAGE_LENGTH); + } this.failedMsg = failedMsg; } From ed6613e1afa589e1d6ebcad829ae3e6d2a79651e Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Mon, 2 Sep 2024 04:38:29 -0400 Subject: [PATCH 505/514] HBASE-28697 Don't clean bulk load system entries until backup is complete (#6089) Co-authored-by: Ray Mattingly --- .../impl/IncrementalTableBackupClient.java | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index b7d1c4a95cc6..bbb39cb3a03a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -103,13 +103,14 @@ protected static int getIndex(TableName tbl, List sTableList) { /* * Reads bulk load records from backup table, iterates through the records and forms the paths for - * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination + * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination. This method does NOT + * clean up the entries in the bulk load system table. Those entries should not be cleaned until + * the backup is marked as complete. * @param sTableList list of tables to be backed up - * @return map of table to List of files + * @return the rowkeys of bulk loaded files */ @SuppressWarnings("unchecked") - protected Map>[] handleBulkLoad(List sTableList) - throws IOException { + protected List handleBulkLoad(List sTableList) throws IOException { Map>[] mapForSrc = new Map[sTableList.size()]; List activeFiles = new ArrayList<>(); List archiveFiles = new ArrayList<>(); @@ -191,8 +192,8 @@ protected Map>[] handleBulkLoad(List sTableList) } copyBulkLoadedFiles(activeFiles, archiveFiles); - backupManager.deleteBulkLoadedRows(pair.getSecond()); - return mapForSrc; + + return pair.getSecond(); } private void copyBulkLoadedFiles(List activeFiles, List archiveFiles) @@ -308,10 +309,12 @@ public void execute() throws IOException { BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); - handleBulkLoad(backupInfo.getTableNames()); + List bulkLoadedRows = handleBulkLoad(backupInfo.getTableNames()); + // backup complete completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf); + backupManager.deleteBulkLoadedRows(bulkLoadedRows); } catch (IOException e) { failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", BackupType.INCREMENTAL, conf); From 84655ded27bbd6d8aada9707d06f23c65d2c4746 Mon Sep 17 00:00:00 2001 From: jhungund <106576553+jhungund@users.noreply.github.com> Date: Tue, 3 Sep 2024 21:18:14 +0530 Subject: [PATCH 506/514] HBASE-28805: Chunked persistence of backing map for persistent bucket cache. (#6183) HBASE-28805: Chunked persistence of backing map for persistent bucket cache. Signed-off-by: Wellington Chevreuil --- .../hbase/io/hfile/bucket/BucketCache.java | 190 +++++++++++++----- .../io/hfile/bucket/BucketProtoUtils.java | 44 +++- .../bucket/TestVerifyBucketCacheFile.java | 47 +++++ 3 files changed, 226 insertions(+), 55 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 5816b8ff1602..10d0c925a47a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -122,6 +123,8 @@ public class BucketCache implements BlockCache, HeapSize { static final String EXTRA_FREE_FACTOR_CONFIG_NAME = "hbase.bucketcache.extrafreefactor"; static final String ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor"; static final String MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor"; + static final String BACKING_MAP_PERSISTENCE_CHUNK_SIZE = + "hbase.bucketcache.persistence.chunksize"; /** Use strong reference for offsetLock or not */ private static final String STRONG_REF_KEY = "hbase.bucketcache.offsetlock.usestrongref"; @@ -145,6 +148,8 @@ public class BucketCache implements BlockCache, HeapSize { final static int DEFAULT_WRITER_THREADS = 3; final static int DEFAULT_WRITER_QUEUE_ITEMS = 64; + final static long DEFAULT_BACKING_MAP_PERSISTENCE_CHUNK_SIZE = 10000000; + // Store/read block data transient final IOEngine ioEngine; @@ -273,6 +278,8 @@ public class BucketCache implements BlockCache, HeapSize { */ private String algorithm; + private long persistenceChunkSize; + /* Tracing failed Bucket Cache allocations. */ private long allocFailLogPrevTs; // time of previous log event for allocation failure. private static final int ALLOCATION_FAIL_LOG_TIME_PERIOD = 60000; // Default 1 minute. @@ -313,6 +320,11 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck this.queueAdditionWaitTime = conf.getLong(QUEUE_ADDITION_WAIT_TIME, DEFAULT_QUEUE_ADDITION_WAIT_TIME); this.bucketcachePersistInterval = conf.getLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, 1000); + this.persistenceChunkSize = + conf.getLong(BACKING_MAP_PERSISTENCE_CHUNK_SIZE, DEFAULT_BACKING_MAP_PERSISTENCE_CHUNK_SIZE); + if (this.persistenceChunkSize <= 0) { + persistenceChunkSize = DEFAULT_BACKING_MAP_PERSISTENCE_CHUNK_SIZE; + } sanityCheckConfigs(); @@ -1358,8 +1370,8 @@ void persistToFile() throws IOException { } File tempPersistencePath = new File(persistencePath + EnvironmentEdgeManager.currentTime()); try (FileOutputStream fos = new FileOutputStream(tempPersistencePath, false)) { - fos.write(ProtobufMagic.PB_MAGIC); - BucketProtoUtils.toPB(this).writeDelimitedTo(fos); + LOG.debug("Persist in new chunked persistence format."); + persistChunkedBackingMap(fos); } catch (IOException e) { LOG.error("Failed to persist bucket cache to file", e); throw e; @@ -1405,16 +1417,23 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { throw new IOException("Incorrect number of bytes read while checking for protobuf magic " + "number. Requested=" + pblen + ", Received= " + read + ", File=" + persistencePath); } - if (!ProtobufMagic.isPBMagicPrefix(pbuf)) { + if (ProtobufMagic.isPBMagicPrefix(pbuf)) { + LOG.info("Reading old format of persistence."); + // The old non-chunked version of backing map persistence. + parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in)); + } else if (Arrays.equals(pbuf, BucketProtoUtils.PB_MAGIC_V2)) { + // The new persistence format of chunked persistence. + LOG.info("Reading new chunked format of persistence."); + retrieveChunkedBackingMap(in, bucketSizes); + } else { // In 3.0 we have enough flexibility to dump the old cache data. // TODO: In 2.x line, this might need to be filled in to support reading the old format throw new IOException( "Persistence file does not start with protobuf magic number. " + persistencePath); } - parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in)); bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize); blockNumber.add(backingMap.size()); - LOG.info("Bucket cache retrieved from file successfully"); + LOG.info("Bucket cache retrieved from file successfully with size: {}", backingMap.size()); } } @@ -1457,6 +1476,75 @@ private void verifyCapacityAndClasses(long capacitySize, String ioclass, String } } + private void verifyFileIntegrity(BucketCacheProtos.BucketCacheEntry proto) { + try { + if (proto.hasChecksum()) { + ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), + algorithm); + } + backingMapValidated.set(true); + } catch (IOException e) { + LOG.warn("Checksum for cache file failed. " + + "We need to validate each cache key in the backing map. " + + "This may take some time, so we'll do it in a background thread,"); + + Runnable cacheValidator = () -> { + while (bucketAllocator == null) { + try { + Thread.sleep(50); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + } + long startTime = EnvironmentEdgeManager.currentTime(); + int totalKeysOriginally = backingMap.size(); + for (Map.Entry keyEntry : backingMap.entrySet()) { + try { + ((FileIOEngine) ioEngine).checkCacheTime(keyEntry.getValue()); + } catch (IOException e1) { + LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey()); + evictBlock(keyEntry.getKey()); + fileNotFullyCached(keyEntry.getKey().getHfileName()); + } + } + backingMapValidated.set(true); + LOG.info("Finished validating {} keys in the backing map. Recovered: {}. This took {}ms.", + totalKeysOriginally, backingMap.size(), + (EnvironmentEdgeManager.currentTime() - startTime)); + }; + Thread t = new Thread(cacheValidator); + t.setDaemon(true); + t.start(); + } + } + + private void parsePB(BucketCacheProtos.BucketCacheEntry firstChunk, + List chunks) throws IOException { + fullyCachedFiles.clear(); + Pair, NavigableSet> pair = + BucketProtoUtils.fromPB(firstChunk.getDeserializersMap(), firstChunk.getBackingMap(), + this::createRecycler); + backingMap.putAll(pair.getFirst()); + blocksByHFile.addAll(pair.getSecond()); + fullyCachedFiles.putAll(BucketProtoUtils.fromPB(firstChunk.getCachedFilesMap())); + + LOG.debug("Number of blocks after first chunk: {}, blocksByHFile: {}", backingMap.size(), + fullyCachedFiles.size()); + int i = 1; + for (BucketCacheProtos.BackingMap chunk : chunks) { + Pair, NavigableSet> pair2 = + BucketProtoUtils.fromPB(firstChunk.getDeserializersMap(), chunk, this::createRecycler); + backingMap.putAll(pair2.getFirst()); + blocksByHFile.addAll(pair2.getSecond()); + LOG.debug("Number of blocks after {} reading chunk: {}, blocksByHFile: {}", ++i, + backingMap.size(), fullyCachedFiles.size()); + } + verifyFileIntegrity(firstChunk); + verifyCapacityAndClasses(firstChunk.getCacheCapacity(), firstChunk.getIoClass(), + firstChunk.getMapClass()); + updateRegionSizeMapWhileRetrievingFromFile(); + } + private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOException { Pair, NavigableSet> pair = BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), @@ -1465,52 +1553,60 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio blocksByHFile = pair.getSecond(); fullyCachedFiles.clear(); fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap())); - if (proto.hasChecksum()) { - try { - ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), - algorithm); - backingMapValidated.set(true); - } catch (IOException e) { - LOG.warn("Checksum for cache file failed. " - + "We need to validate each cache key in the backing map. " - + "This may take some time, so we'll do it in a background thread,"); - Runnable cacheValidator = () -> { - while (bucketAllocator == null) { - try { - Thread.sleep(50); - } catch (InterruptedException ex) { - throw new RuntimeException(ex); - } - } - long startTime = EnvironmentEdgeManager.currentTime(); - int totalKeysOriginally = backingMap.size(); - for (Map.Entry keyEntry : backingMap.entrySet()) { - try { - ((FileIOEngine) ioEngine).checkCacheTime(keyEntry.getValue()); - } catch (IOException e1) { - LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey()); - evictBlock(keyEntry.getKey()); - fullyCachedFiles.remove(keyEntry.getKey().getHfileName()); - } - } - backingMapValidated.set(true); - LOG.info("Finished validating {} keys in the backing map. Recovered: {}. This took {}ms.", - totalKeysOriginally, backingMap.size(), - (EnvironmentEdgeManager.currentTime() - startTime)); - }; - Thread t = new Thread(cacheValidator); - t.setDaemon(true); - t.start(); - } - } else { - // if has not checksum, it means the persistence file is old format - LOG.info("Persistent file is old format, it does not support verifying file integrity!"); - backingMapValidated.set(true); - } + verifyFileIntegrity(proto); updateRegionSizeMapWhileRetrievingFromFile(); verifyCapacityAndClasses(proto.getCacheCapacity(), proto.getIoClass(), proto.getMapClass()); } + private void persistChunkedBackingMap(FileOutputStream fos) throws IOException { + long numChunks = backingMap.size() / persistenceChunkSize; + if (backingMap.size() % persistenceChunkSize != 0) { + numChunks += 1; + } + + LOG.debug( + "persistToFile: before persisting backing map size: {}, " + + "fullycachedFiles size: {}, chunkSize: {}, numberofChunks: {}", + backingMap.size(), fullyCachedFiles.size(), persistenceChunkSize, numChunks); + + BucketProtoUtils.serializeAsPB(this, fos, persistenceChunkSize, numChunks); + + LOG.debug( + "persistToFile: after persisting backing map size: {}, " + + "fullycachedFiles size: {}, numChunksPersisteed: {}", + backingMap.size(), fullyCachedFiles.size(), numChunks); + } + + private void retrieveChunkedBackingMap(FileInputStream in, int[] bucketSizes) throws IOException { + byte[] bytes = new byte[Long.BYTES]; + int readSize = in.read(bytes); + if (readSize != Long.BYTES) { + throw new IOException("Invalid size of chunk-size read from persistence: " + readSize); + } + long batchSize = Bytes.toLong(bytes, 0); + + readSize = in.read(bytes); + if (readSize != Long.BYTES) { + throw new IOException("Invalid size for number of chunks read from persistence: " + readSize); + } + long numChunks = Bytes.toLong(bytes, 0); + + LOG.info("Number of chunks: {}, chunk size: {}", numChunks, batchSize); + + ArrayList bucketCacheMaps = new ArrayList<>(); + // Read the first chunk that has all the details. + BucketCacheProtos.BucketCacheEntry firstChunk = + BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in); + + // Subsequent chunks have the backingMap entries. + for (int i = 1; i < numChunks; i++) { + LOG.info("Reading chunk no: {}", i + 1); + bucketCacheMaps.add(BucketCacheProtos.BackingMap.parseDelimitedFrom(in)); + LOG.info("Retrieved chunk: {}", i + 1); + } + parsePB(firstChunk, bucketCacheMaps); + } + /** * Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors * exceeds ioErrorsDurationTimeTolerated, we will disable the cache diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index 4b42414fb9c5..4618200325c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.io.hfile.bucket; +import java.io.FileOutputStream; import java.io.IOException; import java.util.Comparator; import java.util.HashMap; @@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; @@ -41,29 +43,55 @@ @InterfaceAudience.Private final class BucketProtoUtils { + + final static byte[] PB_MAGIC_V2 = new byte[] { 'V', '2', 'U', 'F' }; + private BucketProtoUtils() { } - static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { + static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache, + BucketCacheProtos.BackingMap backingMap) { return BucketCacheProtos.BucketCacheEntry.newBuilder().setCacheCapacity(cache.getMaxSize()) .setIoClass(cache.ioEngine.getClass().getName()) .setMapClass(cache.backingMap.getClass().getName()) .putAllDeserializers(CacheableDeserializerIdManager.save()) - .putAllCachedFiles(toCachedPB(cache.fullyCachedFiles)) - .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) + .putAllCachedFiles(toCachedPB(cache.fullyCachedFiles)).setBackingMap(backingMap) .setChecksum(ByteString .copyFrom(((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) .build(); } - private static BucketCacheProtos.BackingMap toPB(Map backingMap) { + public static void serializeAsPB(BucketCache cache, FileOutputStream fos, long chunkSize, + long numChunks) throws IOException { + int blockCount = 0; + int chunkCount = 0; + int backingMapSize = cache.backingMap.size(); BucketCacheProtos.BackingMap.Builder builder = BucketCacheProtos.BackingMap.newBuilder(); - for (Map.Entry entry : backingMap.entrySet()) { - builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder().setKey(toPB(entry.getKey())) - .setValue(toPB(entry.getValue())).build()); + + fos.write(PB_MAGIC_V2); + fos.write(Bytes.toBytes(chunkSize)); + fos.write(Bytes.toBytes(numChunks)); + + for (Map.Entry entry : cache.backingMap.entrySet()) { + blockCount++; + builder.addEntry( + BucketCacheProtos.BackingMapEntry.newBuilder().setKey(BucketProtoUtils.toPB(entry.getKey())) + .setValue(BucketProtoUtils.toPB(entry.getValue())).build()); + if (blockCount % chunkSize == 0 || (blockCount == backingMapSize)) { + chunkCount++; + if (chunkCount == 1) { + // Persist all details along with the first chunk into BucketCacheEntry + BucketProtoUtils.toPB(cache, builder.build()).writeDelimitedTo(fos); + } else { + // Directly persist subsequent backing-map chunks. + builder.build().writeDelimitedTo(fos); + } + if (blockCount < backingMapSize) { + builder = BucketCacheProtos.BackingMap.newBuilder(); + } + } } - return builder.build(); } private static BucketCacheProtos.BlockCacheKey toPB(BlockCacheKey key) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java index 0d33fb079bcd..b49a2b1db8d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.apache.hadoop.hbase.io.hfile.CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY; +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BACKING_MAP_PERSISTENCE_CHUNK_SIZE; import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -350,6 +351,52 @@ public void testBucketCacheRecovery() throws Exception { TEST_UTIL.cleanupTestDir(); } + @Test + public void testSingleChunk() throws Exception { + testChunkedBackingMapRecovery(5, 5); + } + + @Test + public void testMultipleChunks() throws Exception { + testChunkedBackingMapRecovery(5, 10); + } + + private void testChunkedBackingMapRecovery(int chunkSize, int numBlocks) throws Exception { + HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + Path testDir = TEST_UTIL.getDataTestDir(); + TEST_UTIL.getTestFileSystem().mkdirs(testDir); + Configuration conf = HBaseConfiguration.create(); + conf.setLong(BACKING_MAP_PERSISTENCE_CHUNK_SIZE, chunkSize); + + String mapFileName = testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime(); + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, mapFileName, + DEFAULT_ERROR_TOLERATION_DURATION, conf); + + CacheTestUtils.HFileBlockPair[] blocks = + CacheTestUtils.generateHFileBlocks(constructedBlockSize, numBlocks); + + for (int i = 0; i < numBlocks; i++) { + cacheAndWaitUntilFlushedToBucket(bucketCache, blocks[i].getBlockName(), blocks[i].getBlock()); + } + + // saves the current state + bucketCache.persistToFile(); + + // Create a new bucket which reads from persistence file. + BucketCache newBucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, mapFileName, + DEFAULT_ERROR_TOLERATION_DURATION, conf); + + assertEquals(numBlocks, newBucketCache.backingMap.size()); + + for (int i = 0; i < numBlocks; i++) { + assertEquals(blocks[i].getBlock(), + newBucketCache.getBlock(blocks[i].getBlockName(), false, false, false)); + } + TEST_UTIL.cleanupTestDir(); + } + private void waitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey) throws InterruptedException { while (!cache.backingMap.containsKey(cacheKey) || cache.ramCache.containsKey(cacheKey)) { From 63b26b126dd61257e1a6fdf2f6eb1737ad99d71c Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Wed, 4 Sep 2024 03:49:38 -0400 Subject: [PATCH 507/514] HBASE-28792: AsyncTableImpl should call coprocessor callbacks in a defined order (#6168) Signed-off-by: Nick Dimiduk --- .../hadoop/hbase/client/AsyncTableImpl.java | 27 ++- ...gregationClientWithCallbackThreadPool.java | 156 ++++++++++++++++++ 2 files changed, 180 insertions(+), 3 deletions(-) create mode 100644 hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClientWithCallbackThreadPool.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index 590ee9bc47a3..3b411cea7fb7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.apache.hadoop.conf.Configuration; @@ -299,19 +300,39 @@ public CoprocessorServiceBuilder coprocessorService( final Context context = Context.current(); CoprocessorCallback wrappedCallback = new CoprocessorCallback() { + private final Phaser regionCompletesInProgress = new Phaser(1); + @Override public void onRegionComplete(RegionInfo region, R resp) { - pool.execute(context.wrap(() -> callback.onRegionComplete(region, resp))); + regionCompletesInProgress.register(); + pool.execute(context.wrap(() -> { + try { + callback.onRegionComplete(region, resp); + } finally { + regionCompletesInProgress.arriveAndDeregister(); + } + })); } @Override public void onRegionError(RegionInfo region, Throwable error) { - pool.execute(context.wrap(() -> callback.onRegionError(region, error))); + regionCompletesInProgress.register(); + pool.execute(context.wrap(() -> { + try { + callback.onRegionError(region, error); + } finally { + regionCompletesInProgress.arriveAndDeregister(); + } + })); } @Override public void onComplete() { - pool.execute(context.wrap(callback::onComplete)); + pool.execute(context.wrap(() -> { + // Guarantee that onComplete() is called after all onRegionComplete()'s are called + regionCompletesInProgress.arriveAndAwaitAdvance(); + callback.onComplete(); + })); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClientWithCallbackThreadPool.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClientWithCallbackThreadPool.java new file mode 100644 index 000000000000..a5c748829970 --- /dev/null +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClientWithCallbackThreadPool.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; +import java.util.stream.LongStream; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.coprocessor.AsyncAggregationClient; +import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter; +import org.apache.hadoop.hbase.coprocessor.AggregateImplementation; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.testclassification.CoprocessorTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Same as TestAsyncAggregationClient, except that {@link AsyncTableImpl} is involved in addition to + * {@link RawAsyncTableImpl}. Exercises the code paths in {@link AsyncTableImpl#coprocessorService}. + */ +@Category({ MediumTests.class, CoprocessorTests.class }) +public class TestAsyncAggregationClientWithCallbackThreadPool { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAsyncAggregationClientWithCallbackThreadPool.class); + + private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static TableName TABLE_NAME = TableName.valueOf("TestAsyncAggregationClient"); + + private static byte[] CF = Bytes.toBytes("CF"); + + private static byte[] CQ = Bytes.toBytes("CQ"); + + private static byte[] CQ2 = Bytes.toBytes("CQ2"); + + private static long COUNT = 1000; + + private static AsyncConnection CONN; + + private static AsyncTable TABLE; + + private static ExecutorService EXECUTOR_SERVICE; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = UTIL.getConfiguration(); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + AggregateImplementation.class.getName()); + UTIL.startMiniCluster(3); + byte[][] splitKeys = new byte[8][]; + for (int i = 111; i < 999; i += 111) { + splitKeys[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i)); + } + UTIL.createTable(TABLE_NAME, CF, splitKeys); + CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); + EXECUTOR_SERVICE = Executors.newFixedThreadPool(1); + TABLE = CONN.getTable(TABLE_NAME, EXECUTOR_SERVICE); + TABLE.putAll(LongStream.range(0, COUNT) + .mapToObj(l -> new Put(Bytes.toBytes(String.format("%03d", l))) + .addColumn(CF, CQ, Bytes.toBytes(l)).addColumn(CF, CQ2, Bytes.toBytes(l * l))) + .collect(Collectors.toList())).get(); + } + + @AfterClass + public static void tearDown() throws Exception { + CONN.close(); + UTIL.shutdownMiniCluster(); + EXECUTOR_SERVICE.shutdownNow(); + } + + @Test + public void testMax() throws InterruptedException, ExecutionException { + assertEquals(COUNT - 1, AsyncAggregationClient + .max(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + } + + @Test + public void testMin() throws InterruptedException, ExecutionException { + assertEquals(0, AsyncAggregationClient + .min(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + } + + @Test + public void testRowCount() throws InterruptedException, ExecutionException { + assertEquals(COUNT, + AsyncAggregationClient + .rowCount(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() + .longValue()); + + // Run the count twice in case some state doesn't get cleaned up inside AsyncTableImpl + // on the first time. + assertEquals(COUNT, + AsyncAggregationClient + .rowCount(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get() + .longValue()); + } + + @Test + public void testSum() throws InterruptedException, ExecutionException { + assertEquals(COUNT * (COUNT - 1) / 2, AsyncAggregationClient + .sum(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().longValue()); + } + + private static final double DELTA = 1E-3; + + @Test + public void testAvg() throws InterruptedException, ExecutionException { + assertEquals( + (COUNT - 1) / 2.0, AsyncAggregationClient + .avg(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().doubleValue(), + DELTA); + } + + @Test + public void testStd() throws InterruptedException, ExecutionException { + double avgSq = + LongStream.range(0, COUNT).map(l -> l * l).reduce((l1, l2) -> l1 + l2).getAsLong() + / (double) COUNT; + double avg = (COUNT - 1) / 2.0; + double std = Math.sqrt(avgSq - avg * avg); + assertEquals( + std, AsyncAggregationClient + .std(TABLE, new LongColumnInterpreter(), new Scan().addColumn(CF, CQ)).get().doubleValue(), + DELTA); + } + +} From 73651dcd857b2623b465fe4a1444286d18d7ad80 Mon Sep 17 00:00:00 2001 From: wdzaslzy <32625989+wdzaslzy@users.noreply.github.com> Date: Wed, 4 Sep 2024 23:02:56 +0800 Subject: [PATCH 508/514] HBASE-28669 After one RegionServer restarts, another RegionServer leaks a connection to ZooKeeper (#6147) Co-authored-by: rodenli Signed-off-by: Duo Zhang --- .../replication/regionserver/RecoveredReplicationSource.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index e47df36e3aa2..68c41316d17d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -49,6 +49,9 @@ protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId, () -> { if (workerThreads.isEmpty()) { this.getSourceMetrics().clear(); + if (this.getReplicationEndpoint() != null) { + this.getReplicationEndpoint().stop(); + } manager.finishRecoveredSource(this); } }); From 01e2d16e470ce1f1ad559925a68adcca300246e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:58:16 +0800 Subject: [PATCH 509/514] HBASE-28813 Bump cryptography in /dev-support/git-jira-release-audit (#6196) Bumps [cryptography](https://github.com/pyca/cryptography) from 42.0.4 to 43.0.1. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/42.0.4...43.0.1) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Duo Zhang --- dev-support/git-jira-release-audit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/git-jira-release-audit/requirements.txt b/dev-support/git-jira-release-audit/requirements.txt index cd3c205a8fff..586a1a66bb3b 100644 --- a/dev-support/git-jira-release-audit/requirements.txt +++ b/dev-support/git-jira-release-audit/requirements.txt @@ -19,7 +19,7 @@ blessed==1.17.0 certifi==2024.7.4 cffi==1.13.2 chardet==3.0.4 -cryptography==42.0.4 +cryptography==43.0.1 defusedxml==0.6.0 enlighten==1.4.0 gitdb2==2.0.6 From 3caaf2d739106b56ab94a0561e730ff35802610d Mon Sep 17 00:00:00 2001 From: Peng Lu Date: Thu, 5 Sep 2024 20:07:36 +0800 Subject: [PATCH 510/514] HBASE-28778 NPE may occur when opening master-status or table.jsp or procedure.jsp while Master is initializing (#6152) Signed-off-by: Duo Zhang --- .../master/http/MasterStatusServlet.java | 5 ++- .../hbase-webapps/master/procedures.jsp | 23 +++++++++-- .../resources/hbase-webapps/master/table.jsp | 39 ++++++++++--------- .../hbase-webapps/regionserver/region.jsp | 12 ++++-- 4 files changed, 54 insertions(+), 25 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java index e46621d7b521..09bb5375a5d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java @@ -77,7 +77,10 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro private ServerName getMetaLocationOrNull(HMaster master) { RegionStateNode rsn = master.getAssignmentManager().getRegionStates() .getRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO); - return rsn.isInState(RegionState.State.OPEN) ? rsn.getRegionLocation() : null; + if (rsn != null) { + return rsn.isInState(RegionState.State.OPEN) ? rsn.getRegionLocation() : null; + } + return null; } private Map getFragmentationInfo(HMaster master, Configuration conf) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp index 0ac2ff3908d3..c25c5c3886ba 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp @@ -47,8 +47,28 @@ <%@ page import="org.apache.hadoop.hbase.metrics.Histogram" %> <%@ page import="java.util.TreeMap" %> <%@ page import="org.apache.hadoop.hbase.metrics.impl.HistogramImpl" %> + + + + + <% HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + if (!master.isInitialized()) { +%> +
    +
    + +
    +


    + +

    +<% return; + } %> + +<% ProcedureExecutor procExecutor = master.getMasterProcedureExecutor(); List> procedures = procExecutor.getProcedures(); Collections.sort(procedures, new Comparator() { @@ -63,9 +83,6 @@ List lockedResources = master.getLocks(); pageContext.setAttribute("pageTitle", "HBase Master Procedures: " + master.getServerName()); %> - - -
    diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index f0599b7aa64a..f0b8ad651171 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -148,11 +148,32 @@ return ""; } %> + + + + + <% final String ZEROMB = "0 MB"; HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); String fqtn = request.getParameter("name"); + // handle the case for fqtn is null or master is not initialized with error message + redirect + if (fqtn == null || !master.isInitialized()) { +%> +
    +
    + +
    +


    + +

    +<% return; + } %> + +<% final String escaped_fqtn = StringEscapeUtils.escapeHtml4(fqtn); Table table = master.getConnection().getTable(TableName.valueOf(fqtn)); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); @@ -201,24 +222,6 @@ final MetaBrowser metaBrowser = new MetaBrowser(connection, request); %> - - - - -<% // handle the case for fqtn is null or master is not initialized with error message + redirect - if (fqtn == null || ! master.isInitialized()) { %> -
    -
    - -
    -


    - -

    -<% return; - } %> - <% // unknow table if (! admin.tableExists(TableName.valueOf(fqtn)).get()) { %>
    diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp index 9cb432b326a2..c6084f74e904 100644 --- a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp @@ -39,8 +39,10 @@ String regionName = request.getParameter("name"); HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); FileSystem fs = rs.getFileSystem(); - - HRegion region = rs.getRegion(regionName); + HRegion region = null; + if (regionName != null) { + region = rs.getRegion(regionName); + } String displayName; boolean isReplicaRegion = false; if (region != null) { @@ -48,7 +50,11 @@ rs.getConfiguration()); isReplicaRegion = region.getRegionInfo().getReplicaId() > RegionInfo.DEFAULT_REPLICA_ID; } else { - displayName = "region {" + regionName + "} is not currently online on this region server"; + if (regionName != null) { + displayName = "region {" + regionName + "} is not currently online on this region server"; + } else { + displayName = "you must specify a region name when accessing this page"; + } } pageContext.setAttribute("pageTitle", "HBase RegionServer: " + rs.getServerName()); %> From b19ee00bbc014c690e583fa1abeb79505bccc346 Mon Sep 17 00:00:00 2001 From: eab148 <54775485+eab148@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:41:01 -0400 Subject: [PATCH 511/514] HBASE-28001: Add request attribute support to BufferedMutator (#6076) Co-authored-by: Evie Boland Signed-off-by: Nick Dimiduk --- .../hbase/client/AsyncBufferedMutator.java | 8 + .../client/AsyncBufferedMutatorBuilder.java | 11 + .../AsyncBufferedMutatorBuilderImpl.java | 15 + .../client/AsyncBufferedMutatorImpl.java | 8 +- .../hadoop/hbase/client/AsyncTable.java | 4 +- .../hbase/client/AsyncTableBuilderBase.java | 6 +- .../hadoop/hbase/client/BufferedMutator.java | 9 + ...fferedMutatorOverAsyncBufferedMutator.java | 6 + .../hbase/client/BufferedMutatorParams.java | 16 + .../client/ConnectionOverAsyncConnection.java | 4 + .../hbase/client/RawAsyncTableImpl.java | 4 +- .../org/apache/hadoop/hbase/client/Table.java | 2 +- .../example/BufferedMutatorExample.java | 16 +- .../client/TestConnectionAttributes.java | 123 ++++++ .../TestRequestAndConnectionAttributes.java | 323 ---------------- .../hbase/client/TestRequestAttributes.java | 360 ++++++++++++++++++ 16 files changed, 581 insertions(+), 334 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java index e5f28d2e0602..6cc2b5adf9d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java @@ -20,6 +20,7 @@ import java.io.Closeable; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; @@ -93,4 +94,11 @@ default CompletableFuture mutate(Mutation mutation) { default long getPeriodicalFlushTimeout(TimeUnit unit) { throw new UnsupportedOperationException("Not implemented"); } + + /** + * Returns the rpc request attributes. + */ + default Map getRequestAttributes() { + return Collections.emptyMap(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java index ed21fb8e23ef..d38aa625fb2b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; +import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; @@ -38,6 +39,16 @@ public interface AsyncBufferedMutatorBuilder { */ AsyncBufferedMutatorBuilder setRpcTimeout(long timeout, TimeUnit unit); + /** + * Set a rpc request attribute. + */ + AsyncBufferedMutatorBuilder setRequestAttribute(String key, byte[] value); + + /** + * Set multiple rpc request attributes. + */ + AsyncBufferedMutatorBuilder setRequestAttributes(Map requestAttributes); + /** * Set the base pause time for retrying. We use an exponential policy to generate sleep time when * retrying. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java index ede5b359e833..6905ff3065cb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; @@ -78,6 +79,20 @@ public AsyncBufferedMutatorBuilder setStartLogErrorsCnt(int startLogErrorsCnt) { return this; } + @Override + public AsyncBufferedMutatorBuilder setRequestAttribute(String key, byte[] value) { + tableBuilder.setRequestAttribute(key, value); + return this; + } + + @Override + public AsyncBufferedMutatorBuilder setRequestAttributes(Map requestAttributes) { + for (Map.Entry requestAttribute : requestAttributes.entrySet()) { + tableBuilder.setRequestAttribute(requestAttribute.getKey(), requestAttribute.getValue()); + } + return this; + } + @Override public AsyncBufferedMutatorBuilder setWriteBufferSize(long writeBufferSize) { Preconditions.checkArgument(writeBufferSize > 0, "writeBufferSize %d must be > 0", diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java index ce4193d91382..3acd8bebdada 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -130,7 +131,7 @@ Stream.> generate(CompletableFuture::new).limit(mutation periodicFlushTask = periodicalFlushTimer.newTimeout(timeout -> { synchronized (AsyncBufferedMutatorImpl.this) { // confirm that we are still valid, if there is already an internalFlush call before us, - // then we should not execute any more. And in internalFlush we will set periodicFlush + // then we should not execute anymore. And in internalFlush we will set periodicFlush // to null, and since we may schedule a new one, so here we check whether the references // are equal. if (timeout == periodicFlushTask) { @@ -170,4 +171,9 @@ public long getWriteBufferSize() { public long getPeriodicalFlushTimeout(TimeUnit unit) { return unit.convert(periodicFlushTimeoutNs, TimeUnit.NANOSECONDS); } + + @Override + public Map getRequestAttributes() { + return table.getRequestAttributes(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index 2979c6689884..f14eac3cf79a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -21,12 +21,12 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.toCheckExistenceOnly; import static org.apache.hadoop.hbase.util.FutureUtils.allOf; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; -import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.TableName; @@ -117,7 +117,7 @@ public interface AsyncTable { * @return a map of request attributes supplied by the client */ default Map getRequestAttributes() { - throw new NotImplementedException("Add an implementation!"); + return Collections.emptyMap(); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java index 02e9da0770b4..428e7358195e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java @@ -129,10 +129,10 @@ public AsyncTableBuilderBase setStartLogErrorsCnt(int startLogErrorsCnt) { @Override public AsyncTableBuilder setRequestAttribute(String key, byte[] value) { - if (this.requestAttributes.isEmpty()) { - this.requestAttributes = new HashMap<>(); + if (requestAttributes.isEmpty()) { + requestAttributes = new HashMap<>(); } - this.requestAttributes.put(key, value); + requestAttributes.put(key, value); return this; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index f790f5a4e2f5..24563367bbbc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -19,7 +19,9 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -204,6 +206,13 @@ default void setOperationTimeout(int timeout) { "The BufferedMutator::setOperationTimeout has not been implemented"); } + /** + * Returns the rpc request attributes. + */ + default Map getRequestAttributes() { + return Collections.emptyMap(); + } + /** * Listens for asynchronous exceptions on a {@link BufferedMutator}. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java index 72692eac59e5..aec4a0cbf216 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; @@ -186,4 +187,9 @@ public void setRpcTimeout(int timeout) { public void setOperationTimeout(int timeout) { // no effect } + + @Override + public Map getRequestAttributes() { + return mutator.getRequestAttributes(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index b3efa14fa7ee..44bc5e2be7cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.ExecutorService; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -38,6 +41,7 @@ public class BufferedMutatorParams implements Cloneable { private String implementationClassName = null; private int rpcTimeout = UNSET; private int operationTimeout = UNSET; + protected Map requestAttributes = Collections.emptyMap(); private BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() { @Override public void onException(RetriesExhaustedWithDetailsException exception, @@ -85,6 +89,18 @@ public int getOperationTimeout() { return operationTimeout; } + public BufferedMutatorParams setRequestAttribute(String key, byte[] value) { + if (requestAttributes.isEmpty()) { + requestAttributes = new HashMap<>(); + } + requestAttributes.put(key, value); + return this; + } + + public Map getRequestAttributes() { + return requestAttributes; + } + /** * Override the write buffer size specified by the provided {@link Connection}'s * {@link org.apache.hadoop.conf.Configuration} instance, via the configuration key diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java index 30c348e6d1f1..d299e453266e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java @@ -107,6 +107,10 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws I if (params.getMaxKeyValueSize() != BufferedMutatorParams.UNSET) { builder.setMaxKeyValueSize(params.getMaxKeyValueSize()); } + if (!params.getRequestAttributes().isEmpty()) { + + builder.setRequestAttributes(params.getRequestAttributes()); + } return new BufferedMutatorOverAsyncBufferedMutator(builder.build(), params.getListener()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java index 342cf89acf1a..257a5788b183 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java @@ -214,8 +214,8 @@ private SingleRequestCallerBuilder newCaller(byte[] row, int priority, lo .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) .pause(pauseNs, TimeUnit.NANOSECONDS) .pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).setRequestAttributes(requestAttributes) - .startLogErrorsCnt(startLogErrorsCnt).setRequestAttributes(requestAttributes); + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .setRequestAttributes(requestAttributes); } private SingleRequestCallerBuilder diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 3941c0d18540..907e3d1a7040 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -757,6 +757,6 @@ default long getOperationTimeout(TimeUnit unit) { * @return map of request attributes */ default Map getRequestAttributes() { - throw new NotImplementedException("Add an implementation!"); + return Collections.emptyMap(); } } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/BufferedMutatorExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/BufferedMutatorExample.java index 1c4447eb6598..1835d51d0386 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/BufferedMutatorExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/BufferedMutatorExample.java @@ -19,7 +19,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -27,7 +29,9 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; @@ -67,12 +71,19 @@ public void onException(RetriesExhaustedWithDetailsException e, BufferedMutator } } }; - BufferedMutatorParams params = new BufferedMutatorParams(TABLE).listener(listener); + + BufferedMutatorParams params = new BufferedMutatorParams(TABLE).listener(listener) + .setRequestAttribute("requestInfo", Bytes.toBytes("bar")); // // step 1: create a single Connection and a BufferedMutator, shared by all worker threads. // - try (final Connection conn = ConnectionFactory.createConnection(getConf()); + Map connectionAttributes = new HashMap<>(); + connectionAttributes.put("clientId", Bytes.toBytes("foo")); + Configuration conf = getConf(); + try ( + final Connection conn = ConnectionFactory.createConnection(conf, null, + AuthUtil.loginClient(conf), connectionAttributes); final BufferedMutator mutator = conn.getBufferedMutator(params)) { /** worker pool that operates on BufferedTable instances */ @@ -104,6 +115,7 @@ public Void call() throws Exception { f.get(5, TimeUnit.MINUTES); } workerPool.shutdown(); + mutator.flush(); } catch (IOException e) { // exception while creating/destroying Connection or BufferedMutator LOG.info("exception while creating/destroying Connection or BufferedMutator", e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java new file mode 100644 index 000000000000..49c7e86975d1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, MediumTests.class }) +public class TestConnectionAttributes { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestConnectionAttributes.class); + + private static final Map CONNECTION_ATTRIBUTES = new HashMap<>(); + static { + CONNECTION_ATTRIBUTES.put("clientId", Bytes.toBytes("foo")); + } + private static final byte[] FAMILY = Bytes.toBytes("0"); + private static final TableName TABLE_NAME = TableName.valueOf("testConnectionAttributes"); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static SingleProcessHBaseCluster cluster; + + @BeforeClass + public static void setUp() throws Exception { + cluster = TEST_UTIL.startMiniCluster(1); + Table table = TEST_UTIL.createTable(TABLE_NAME, new byte[][] { FAMILY }, 1, + HConstants.DEFAULT_BLOCKSIZE, TestConnectionAttributes.AttributesCoprocessor.class.getName()); + table.close(); + } + + @AfterClass + public static void afterClass() throws Exception { + cluster.close(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testConnectionHeaderOverwrittenAttributesRemain() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try (Connection conn = ConnectionFactory.createConnection(conf, null, + AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES); Table table = conn.getTable(TABLE_NAME)) { + + // submit a 300 byte rowkey here to encourage netty's allocator to overwrite the connection + // header + byte[] bytes = new byte[300]; + new Random().nextBytes(bytes); + Result result = table.get(new Get(bytes)); + + assertEquals(CONNECTION_ATTRIBUTES.size(), result.size()); + for (Map.Entry attr : CONNECTION_ATTRIBUTES.entrySet()) { + byte[] val = result.getValue(FAMILY, Bytes.toBytes(attr.getKey())); + assertEquals(Bytes.toStringBinary(attr.getValue()), Bytes.toStringBinary(val)); + } + } + } + + public static class AttributesCoprocessor implements RegionObserver, RegionCoprocessor { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preGetOp(ObserverContext c, Get get, + List result) throws IOException { + RpcCall rpcCall = RpcServer.getCurrentCall().get(); + for (Map.Entry attr : rpcCall.getConnectionAttributes().entrySet()) { + result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) + .setFamily(FAMILY).setQualifier(Bytes.toBytes(attr.getKey())).setValue(attr.getValue()) + .setType(Cell.Type.Put).setTimestamp(1).build()); + } + result.sort(CellComparator.getInstance()); + c.bypass(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java deleted file mode 100644 index 728b877a32b4..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAndConnectionAttributes.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.AuthUtil; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.ipc.RpcCall; -import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.WALEdit; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; - -@Category({ ClientTests.class, MediumTests.class }) -public class TestRequestAndConnectionAttributes { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRequestAndConnectionAttributes.class); - - private static final Map CONNECTION_ATTRIBUTES = new HashMap<>(); - static { - CONNECTION_ATTRIBUTES.put("clientId", Bytes.toBytes("foo")); - } - private static final Map REQUEST_ATTRIBUTES = new HashMap<>(); - private static final ExecutorService EXECUTOR_SERVICE = Executors.newFixedThreadPool(100); - private static final AtomicBoolean REQUEST_ATTRIBUTES_VALIDATED = new AtomicBoolean(false); - private static final byte[] REQUEST_ATTRIBUTES_TEST_TABLE_CF = Bytes.toBytes("0"); - private static final TableName REQUEST_ATTRIBUTES_TEST_TABLE = - TableName.valueOf("testRequestAttributes"); - - private static HBaseTestingUtil TEST_UTIL = null; - - @BeforeClass - public static void setUp() throws Exception { - TEST_UTIL = new HBaseTestingUtil(); - TEST_UTIL.startMiniCluster(1); - TEST_UTIL.createTable(REQUEST_ATTRIBUTES_TEST_TABLE, - new byte[][] { REQUEST_ATTRIBUTES_TEST_TABLE_CF }, 1, HConstants.DEFAULT_BLOCKSIZE, - AttributesCoprocessor.class.getName()); - } - - @AfterClass - public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Before - public void setup() { - REQUEST_ATTRIBUTES_VALIDATED.getAndSet(false); - } - - @Test - public void testConnectionHeaderOverwrittenAttributesRemain() throws IOException { - TableName tableName = TableName.valueOf("testConnectionAttributes"); - byte[] cf = Bytes.toBytes("0"); - TEST_UTIL.createTable(tableName, new byte[][] { cf }, 1, HConstants.DEFAULT_BLOCKSIZE, - AttributesCoprocessor.class.getName()); - - Configuration conf = TEST_UTIL.getConfiguration(); - try (Connection conn = ConnectionFactory.createConnection(conf, null, - AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES); Table table = conn.getTable(tableName)) { - - // submit a 300 byte rowkey here to encourage netty's allocator to overwrite the connection - // header - byte[] bytes = new byte[300]; - new Random().nextBytes(bytes); - Result result = table.get(new Get(bytes)); - - assertEquals(CONNECTION_ATTRIBUTES.size(), result.size()); - for (Map.Entry attr : CONNECTION_ATTRIBUTES.entrySet()) { - byte[] val = result.getValue(Bytes.toBytes("c"), Bytes.toBytes(attr.getKey())); - assertEquals(Bytes.toStringBinary(attr.getValue()), Bytes.toStringBinary(val)); - } - } - } - - @Test - public void testRequestAttributesGet() throws IOException { - addRandomRequestAttributes(); - - Configuration conf = TEST_UTIL.getConfiguration(); - try ( - Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), - CONNECTION_ATTRIBUTES); - Table table = configureRequestAttributes( - conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { - - table.get(new Get(Bytes.toBytes(0))); - } - - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - - @Test - public void testRequestAttributesMultiGet() throws IOException { - assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); - addRandomRequestAttributes(); - - Configuration conf = TEST_UTIL.getConfiguration(); - try ( - Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), - CONNECTION_ATTRIBUTES); - Table table = configureRequestAttributes( - conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { - List gets = ImmutableList.of(new Get(Bytes.toBytes(0)), new Get(Bytes.toBytes(1))); - table.get(gets); - } - - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - - @Test - public void testRequestAttributesExists() throws IOException { - assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); - addRandomRequestAttributes(); - - Configuration conf = TEST_UTIL.getConfiguration(); - try ( - Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), - CONNECTION_ATTRIBUTES); - Table table = configureRequestAttributes( - conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { - - table.exists(new Get(Bytes.toBytes(0))); - } - - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - - @Test - public void testRequestAttributesScan() throws IOException { - assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); - addRandomRequestAttributes(); - - Configuration conf = TEST_UTIL.getConfiguration(); - try ( - Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), - CONNECTION_ATTRIBUTES); - Table table = configureRequestAttributes( - conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { - ResultScanner scanner = table.getScanner(new Scan()); - scanner.next(); - } - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - - @Test - public void testRequestAttributesPut() throws IOException { - assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); - addRandomRequestAttributes(); - - Configuration conf = TEST_UTIL.getConfiguration(); - try ( - Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), - CONNECTION_ATTRIBUTES); - Table table = configureRequestAttributes( - conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { - Put put = new Put(Bytes.toBytes("a")); - put.addColumn(REQUEST_ATTRIBUTES_TEST_TABLE_CF, Bytes.toBytes("c"), Bytes.toBytes("v")); - table.put(put); - } - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - - @Test - public void testRequestAttributesMultiPut() throws IOException { - assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); - addRandomRequestAttributes(); - - Configuration conf = TEST_UTIL.getConfiguration(); - try ( - Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), - CONNECTION_ATTRIBUTES); - Table table = configureRequestAttributes( - conn.getTableBuilder(REQUEST_ATTRIBUTES_TEST_TABLE, EXECUTOR_SERVICE)).build()) { - Put put = new Put(Bytes.toBytes("a")); - put.addColumn(REQUEST_ATTRIBUTES_TEST_TABLE_CF, Bytes.toBytes("c"), Bytes.toBytes("v")); - table.put(put); - } - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - - @Test - public void testNoRequestAttributes() throws IOException { - assertFalse(REQUEST_ATTRIBUTES_VALIDATED.get()); - TableName tableName = TableName.valueOf("testNoRequestAttributesScan"); - TEST_UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("0") }, 1, - HConstants.DEFAULT_BLOCKSIZE, AttributesCoprocessor.class.getName()); - - REQUEST_ATTRIBUTES.clear(); - Configuration conf = TEST_UTIL.getConfiguration(); - try (Connection conn = ConnectionFactory.createConnection(conf, null, - AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES)) { - TableBuilder tableBuilder = conn.getTableBuilder(tableName, null); - try (Table table = tableBuilder.build()) { - table.get(new Get(Bytes.toBytes(0))); - assertTrue(REQUEST_ATTRIBUTES_VALIDATED.get()); - } - } - } - - private void addRandomRequestAttributes() { - REQUEST_ATTRIBUTES.clear(); - int j = Math.max(2, (int) (10 * Math.random())); - for (int i = 0; i < j; i++) { - REQUEST_ATTRIBUTES.put(String.valueOf(i), Bytes.toBytes(UUID.randomUUID().toString())); - } - } - - private static TableBuilder configureRequestAttributes(TableBuilder tableBuilder) { - REQUEST_ATTRIBUTES.forEach(tableBuilder::setRequestAttribute); - return tableBuilder; - } - - public static class AttributesCoprocessor implements RegionObserver, RegionCoprocessor { - - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void preGetOp(ObserverContext c, Get get, - List result) throws IOException { - validateRequestAttributes(); - - // for connection attrs test - RpcCall rpcCall = RpcServer.getCurrentCall().get(); - for (Map.Entry attr : rpcCall.getRequestAttributes().entrySet()) { - result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) - .setFamily(Bytes.toBytes("r")).setQualifier(Bytes.toBytes(attr.getKey())) - .setValue(attr.getValue()).setType(Cell.Type.Put).setTimestamp(1).build()); - } - for (Map.Entry attr : rpcCall.getConnectionAttributes().entrySet()) { - result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow()) - .setFamily(Bytes.toBytes("c")).setQualifier(Bytes.toBytes(attr.getKey())) - .setValue(attr.getValue()).setType(Cell.Type.Put).setTimestamp(1).build()); - } - result.sort(CellComparator.getInstance()); - c.bypass(); - } - - @Override - public boolean preScannerNext(ObserverContext c, - InternalScanner s, List result, int limit, boolean hasNext) throws IOException { - validateRequestAttributes(); - return hasNext; - } - - @Override - public void prePut(ObserverContext c, Put put, WALEdit edit) - throws IOException { - validateRequestAttributes(); - } - - private void validateRequestAttributes() { - RpcCall rpcCall = RpcServer.getCurrentCall().get(); - Map attrs = rpcCall.getRequestAttributes(); - if (attrs.size() != REQUEST_ATTRIBUTES.size()) { - return; - } - for (Map.Entry attr : attrs.entrySet()) { - if (!REQUEST_ATTRIBUTES.containsKey(attr.getKey())) { - return; - } - if (!Arrays.equals(REQUEST_ATTRIBUTES.get(attr.getKey()), attr.getValue())) { - return; - } - } - REQUEST_ATTRIBUTES_VALIDATED.getAndSet(true); - } - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java new file mode 100644 index 000000000000..66486ebcdd20 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCellScannable; +import org.apache.hadoop.hbase.ExtendedCellScanner; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController; +import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WALEdit; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, MediumTests.class }) +public class TestRequestAttributes { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRequestAttributes.class); + + private static final byte[] ROW_KEY1 = Bytes.toBytes("1"); + private static final byte[] ROW_KEY2A = Bytes.toBytes("2A"); + private static final byte[] ROW_KEY2B = Bytes.toBytes("2B"); + private static final byte[] ROW_KEY3 = Bytes.toBytes("3"); + private static final byte[] ROW_KEY4 = Bytes.toBytes("4"); + private static final byte[] ROW_KEY5 = Bytes.toBytes("5"); + private static final byte[] ROW_KEY6 = Bytes.toBytes("6"); + private static final byte[] ROW_KEY7 = Bytes.toBytes("7"); + private static final byte[] ROW_KEY8 = Bytes.toBytes("8"); + private static final Map CONNECTION_ATTRIBUTES = new HashMap<>(); + private static final Map REQUEST_ATTRIBUTES_SCAN = addRandomRequestAttributes(); + private static final Map> ROW_KEY_TO_REQUEST_ATTRIBUTES = + new HashMap<>(); + static { + CONNECTION_ATTRIBUTES.put("clientId", Bytes.toBytes("foo")); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY1, addRandomRequestAttributes()); + Map requestAttributes2 = addRandomRequestAttributes(); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY2A, requestAttributes2); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY2B, requestAttributes2); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY3, addRandomRequestAttributes()); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY4, addRandomRequestAttributes()); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY5, addRandomRequestAttributes()); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY6, addRandomRequestAttributes()); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY7, addRandomRequestAttributes()); + ROW_KEY_TO_REQUEST_ATTRIBUTES.put(ROW_KEY8, new HashMap<>()); + } + private static final ExecutorService EXECUTOR_SERVICE = Executors.newFixedThreadPool(100); + private static final byte[] FAMILY = Bytes.toBytes("0"); + private static final TableName TABLE_NAME = TableName.valueOf("testRequestAttributes"); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static SingleProcessHBaseCluster cluster; + + @BeforeClass + public static void setUp() throws Exception { + cluster = TEST_UTIL.startMiniCluster(1); + Table table = TEST_UTIL.createTable(TABLE_NAME, new byte[][] { FAMILY }, 1, + HConstants.DEFAULT_BLOCKSIZE, AttributesCoprocessor.class.getName()); + table.close(); + } + + @AfterClass + public static void afterClass() throws Exception { + cluster.close(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testRequestAttributesGet() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes(conn.getTableBuilder(TABLE_NAME, EXECUTOR_SERVICE), + ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY1)).build()) { + + table.get(new Get(ROW_KEY1)); + } + } + + @Test + public void testRequestAttributesMultiGet() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes(conn.getTableBuilder(TABLE_NAME, EXECUTOR_SERVICE), + ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY2A)).build()) { + List gets = List.of(new Get(ROW_KEY2A), new Get(ROW_KEY2B)); + table.get(gets); + } + } + + @Test + public void testRequestAttributesScan() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes(conn.getTableBuilder(TABLE_NAME, EXECUTOR_SERVICE), + REQUEST_ATTRIBUTES_SCAN).build()) { + ResultScanner scanner = table.getScanner(new Scan()); + scanner.next(); + } + } + + @Test + public void testRequestAttributesPut() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes(conn.getTableBuilder(TABLE_NAME, EXECUTOR_SERVICE), + ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY3)).build()) { + Put put = new Put(ROW_KEY3); + put.addColumn(FAMILY, Bytes.toBytes("c"), Bytes.toBytes("v")); + table.put(put); + } + } + + @Test + public void testRequestAttributesMultiPut() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes(conn.getTableBuilder(TABLE_NAME, EXECUTOR_SERVICE), + ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY4)).build()) { + Put put1 = new Put(ROW_KEY4); + put1.addColumn(FAMILY, Bytes.toBytes("c1"), Bytes.toBytes("v1")); + Put put2 = new Put(ROW_KEY4); + put2.addColumn(FAMILY, Bytes.toBytes("c2"), Bytes.toBytes("v2")); + table.put(List.of(put1, put2)); + } + } + + @Test + public void testRequestAttributesBufferedMutate() throws IOException, InterruptedException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + BufferedMutator bufferedMutator = + conn.getBufferedMutator(configureRequestAttributes(new BufferedMutatorParams(TABLE_NAME), + ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY5)));) { + Put put = new Put(ROW_KEY5); + put.addColumn(FAMILY, Bytes.toBytes("c"), Bytes.toBytes("v")); + bufferedMutator.mutate(put); + bufferedMutator.flush(); + } + } + + @Test + public void testRequestAttributesExists() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + Table table = configureRequestAttributes(conn.getTableBuilder(TABLE_NAME, EXECUTOR_SERVICE), + ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY6)).build()) { + + table.exists(new Get(ROW_KEY6)); + } + } + + @Test + public void testRequestAttributesFromRpcController() throws IOException, InterruptedException { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, + RequestMetadataControllerFactory.class, RpcControllerFactory.class); + try ( + Connection conn = ConnectionFactory.createConnection(conf, null, AuthUtil.loginClient(conf), + CONNECTION_ATTRIBUTES); + BufferedMutator bufferedMutator = conn.getBufferedMutator(TABLE_NAME);) { + Put put = new Put(ROW_KEY7); + put.addColumn(FAMILY, Bytes.toBytes("c"), Bytes.toBytes("v")); + bufferedMutator.mutate(put); + bufferedMutator.flush(); + } + conf.unset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY); + } + + @Test + public void testNoRequestAttributes() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + try (Connection conn = ConnectionFactory.createConnection(conf, null, + AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES)) { + TableBuilder tableBuilder = conn.getTableBuilder(TABLE_NAME, null); + try (Table table = tableBuilder.build()) { + table.get(new Get(ROW_KEY8)); + } + } + } + + private static Map addRandomRequestAttributes() { + Map requestAttributes = new HashMap<>(); + int j = Math.max(2, (int) (10 * Math.random())); + for (int i = 0; i < j; i++) { + requestAttributes.put(String.valueOf(i), Bytes.toBytes(UUID.randomUUID().toString())); + } + return requestAttributes; + } + + private static TableBuilder configureRequestAttributes(TableBuilder tableBuilder, + Map requestAttributes) { + requestAttributes.forEach(tableBuilder::setRequestAttribute); + return tableBuilder; + } + + private static BufferedMutatorParams configureRequestAttributes(BufferedMutatorParams params, + Map requestAttributes) { + requestAttributes.forEach(params::setRequestAttribute); + return params; + } + + public static class RequestMetadataControllerFactory extends RpcControllerFactory { + + public RequestMetadataControllerFactory(Configuration conf) { + super(conf); + } + + @Override + public HBaseRpcController newController() { + return new RequestMetadataController(super.newController()); + } + + @Override + public HBaseRpcController newController(ExtendedCellScanner cellScanner) { + return new RequestMetadataController(super.newController(null, cellScanner)); + } + + @Override + public HBaseRpcController newController(RegionInfo regionInfo, + ExtendedCellScanner cellScanner) { + return new RequestMetadataController(super.newController(regionInfo, cellScanner)); + } + + @Override + public HBaseRpcController newController(final List cellIterables) { + return new RequestMetadataController(super.newController(null, cellIterables)); + } + + @Override + public HBaseRpcController newController(RegionInfo regionInfo, + final List cellIterables) { + return new RequestMetadataController(super.newController(regionInfo, cellIterables)); + } + + public static class RequestMetadataController extends DelegatingHBaseRpcController { + private final Map requestAttributes; + + RequestMetadataController(HBaseRpcController delegate) { + super(delegate); + this.requestAttributes = ROW_KEY_TO_REQUEST_ATTRIBUTES.get(ROW_KEY7); + } + + @Override + public Map getRequestAttributes() { + return requestAttributes; + } + } + } + + public static class AttributesCoprocessor implements RegionObserver, RegionCoprocessor { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preGetOp(ObserverContext c, Get get, + List result) throws IOException { + validateRequestAttributes(getRequestAttributesForRowKey(get.getRow())); + } + + @Override + public boolean preScannerNext(ObserverContext c, + InternalScanner s, List result, int limit, boolean hasNext) throws IOException { + validateRequestAttributes(REQUEST_ATTRIBUTES_SCAN); + return hasNext; + } + + @Override + public void prePut(ObserverContext c, Put put, WALEdit edit) + throws IOException { + validateRequestAttributes(getRequestAttributesForRowKey(put.getRow())); + } + + private Map getRequestAttributesForRowKey(byte[] rowKey) { + for (byte[] byteArray : ROW_KEY_TO_REQUEST_ATTRIBUTES.keySet()) { + if (Arrays.equals(byteArray, rowKey)) { + return ROW_KEY_TO_REQUEST_ATTRIBUTES.get(byteArray); + } + } + return null; + } + + private void validateRequestAttributes(Map requestAttributes) { + RpcCall rpcCall = RpcServer.getCurrentCall().get(); + Map attrs = rpcCall.getRequestAttributes(); + if (attrs.size() != requestAttributes.size()) { + return; + } + for (Map.Entry attr : attrs.entrySet()) { + if (!requestAttributes.containsKey(attr.getKey())) { + return; + } + if (!Arrays.equals(requestAttributes.get(attr.getKey()), attr.getValue())) { + return; + } + } + } + } +} From 241bbaf0718d6215775cd4996e9ca08ba12eb086 Mon Sep 17 00:00:00 2001 From: WangXin <1458451310@qq.com> Date: Fri, 6 Sep 2024 16:22:27 +0800 Subject: [PATCH 512/514] HBASE-28775 Change the output of DatanodeInfo in the log to the hostname of the datanode (#6148) Co-authored-by: wangxin Signed-off-by: Duo Zhang Signed-off-by: Nihal Jain Reviewed-by: Vineet Kumar Maheshwari Reviewed-by: guluo --- .../FanOutOneBlockAsyncDFSOutputHelper.java | 19 +++++++++++++++++-- .../hbase/regionserver/wal/AbstractFSWAL.java | 5 ++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 7a4f624e6e06..879dfda77ec0 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -35,6 +35,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.Collection; import java.util.EnumSet; import java.util.HashSet; import java.util.IdentityHashMap; @@ -42,6 +43,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.Encryptor; @@ -473,8 +475,10 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d Set toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); for (int retry = 0;; retry++) { - LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, - toExcludeNodes, retry); + if (LOG.isDebugEnabled()) { + LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, + getDataNodeInfo(toExcludeNodes), retry); + } HdfsFileStatus stat; try { stat = FILE_CREATOR.create(namenode, src, @@ -620,4 +624,15 @@ static void sleepIgnoreInterrupt(int retry) { } catch (InterruptedException e) { } } + + public static String getDataNodeInfo(Collection datanodeInfos) { + if (datanodeInfos.isEmpty()) { + return "[]"; + } + return datanodeInfos.stream() + .map(datanodeInfo -> new StringBuilder().append("(").append(datanodeInfo.getHostName()) + .append("/").append(datanodeInfo.getInfoAddr()).append(":") + .append(datanodeInfo.getInfoPort()).append(")").toString()) + .collect(Collectors.joining(",", "[", "]")); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index bba9bd534e9a..77c296b096cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -71,6 +71,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; +import java.util.stream.Collectors; import org.apache.commons.lang3.mutable.MutableLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -85,6 +86,7 @@ import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; +import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerCall; @@ -1105,7 +1107,8 @@ private Map> rollWriterInternal(boolean force) throws IOExc tellListenersAboutPostLogRoll(oldPath, newPath); if (LOG.isDebugEnabled()) { LOG.debug("Create new " + implClassName + " writer with pipeline: " - + Arrays.toString(getPipeline())); + + FanOutOneBlockAsyncDFSOutputHelper + .getDataNodeInfo(Arrays.stream(getPipeline()).collect(Collectors.toList()))); } // We got a new writer, so reset the slow sync count lastTimeCheckSlowSync = EnvironmentEdgeManager.currentTime(); From 35c7fc0bab4371fd5e726838ee093b933503cb39 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 6 Sep 2024 16:55:17 +0800 Subject: [PATCH 513/514] HBASE-28580 Revert the deprecation for methods in WALObserver (#6205) Signed-off-by: Nick Dimiduk --- .../hadoop/hbase/coprocessor/WALObserver.java | 30 +++++++++++-------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java index bc57dbc735a4..768cc3955e56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java @@ -56,25 +56,31 @@ @InterfaceStability.Evolving public interface WALObserver { /** - * Called before a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is - * InterfaceAudience.Private. Changing the WALKey will cause damage. - * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in - * hbase-3.0.0. + * Called before a {@link WALEdit} is writen to WAL. + *

    + * The method is marked as deprecated in 2.0.0, but later we abstracted the WALKey interface for + * coprocessors, now it is OK to expose this to coprocessor users, so we revert the deprecation. + * But you still need to be careful while changing {@link WALEdit}, as when reaching here, if you + * add some cells to WALEdit, it will only be written to WAL but no in memstore, but when + * replaying you will get these cells and there are CP hooks to intercept these cells. + *

    + * See HBASE-28580. */ - @Deprecated default void preWALWrite(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { } /** - * Called after a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is - * InterfaceAudience.Private. Changing the WALKey will cause damage. - * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in - * hbase-3.0.0. + * Called after a {@link WALEdit} is writen to WAL. + *

    + * The method is marked as deprecated in 2.0.0, but later we abstracted the WALKey interface for + * coprocessors, now it is OK to expose this to coprocessor users, so we revert the deprecation. + * But you still need to be careful while changing {@link WALEdit}, as when reaching here, if you + * add some cells to WALEdit, it will only be written to WAL but no in memstore, but when + * replaying you will get these cells and there are CP hooks to intercept these cells. + *

    + * See HBASE-28580. */ - @Deprecated default void postWALWrite(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { } From 3bb09cbb4df95ae74d0fcb7c2fd889fc0cf7bb83 Mon Sep 17 00:00:00 2001 From: WangXin <1458451310@qq.com> Date: Fri, 6 Sep 2024 21:07:23 +0800 Subject: [PATCH 514/514] HBASE-28807 Remove some useless code and add some logs for CanaryTool (#6187) Co-authored-by: wangxin Signed-off-by: Nihal Jain (cherry picked from commit b161ad573e6ede257528a2b0802e07e6a6c8eccb) --- .../apache/hadoop/hbase/tool/CanaryTool.java | 34 ++++++++----------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index 21e9edfe0688..f21edd176d4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -1734,26 +1734,22 @@ private static List> sniff(final Admin admin, final Sink sink, TableDescriptor tableDesc, ExecutorService executor, TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency, boolean readAllCF) throws Exception { LOG.debug("Reading list of regions for table {}", tableDesc.getTableName()); - try (Table table = admin.getConnection().getTable(tableDesc.getTableName())) { - List tasks = new ArrayList<>(); - try (RegionLocator regionLocator = - admin.getConnection().getRegionLocator(tableDesc.getTableName())) { - for (HRegionLocation location : regionLocator.getAllRegionLocations()) { - if (location == null) { - LOG.warn("Null location"); - continue; - } - ServerName rs = location.getServerName(); - RegionInfo region = location.getRegion(); - tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, - taskType, rawScanEnabled, rwLatency, readAllCF)); - Map> regionMap = ((RegionStdOutSink) sink).getRegionMap(); - regionMap.put(region.getRegionNameAsString(), new ArrayList()); + List tasks = new ArrayList<>(); + try (RegionLocator regionLocator = + admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + for (HRegionLocation location : regionLocator.getAllRegionLocations()) { + if (location == null) { + LOG.warn("Null location for table {}", tableDesc.getTableName()); + continue; } - return executor.invokeAll(tasks); + ServerName rs = location.getServerName(); + RegionInfo region = location.getRegion(); + tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, + taskType, rawScanEnabled, rwLatency, readAllCF)); + Map> regionMap = ((RegionStdOutSink) sink).getRegionMap(); + regionMap.put(region.getRegionNameAsString(), new ArrayList()); } - } catch (TableNotFoundException e) { - return Collections.EMPTY_LIST; + return executor.invokeAll(tasks); } } @@ -1959,7 +1955,7 @@ private Map> getAllRegionServerByName() { this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { - LOG.warn("Null location"); + LOG.warn("Null location for table {}", tableDesc.getTableName()); continue; } ServerName rs = location.getServerName();